comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
`.block()` in `Mono` and `Flux` is annotated as `nullable` so Spotbugs will flags any variables instantiated using `.block()` results in it being flagged as potential null value usage. Based on the calling pattern here, and below, would it be possible to defer the `.block()` until later so this check isn't needed? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
Would be great to avoid it altogether. However, it would be great to know the pattern to suppress warnings if we have good reason to do so (like writing dead code in this code). In .NET we use [this](https://github.com/Azure/azure-sdk-for-net/search?q=%22%23pragma+warning+disable%22) to accomplish that. I guess something similar should be possible with java's toolchain? | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
Changed | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | (r, conditions) -> client.downloadWithResponse(r, null, conditions, false); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
Changed | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | com.azure.storage.common.ParallelTransferOptions pOptions = | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
See previous comment | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
Updated | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | } | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
Done | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue(); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
I'd rather not take action on. I don't think patterns around InputStreams will cause that to result in memory pressure. 1) It's only 4Mb and 2) I expect people will be reading all or most of that data immediately after opening the stream (when we buffer it). It'd really only be an issue if people were opening but not reading lots of streams, which seems like anti pattern. Or like just not even a pattern | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions pOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(r, conditions) -> client.downloadWithResponse(r, null, conditions, false);
Tuple3<Long, BlobRequestConditions, BlobDownloadAsyncResponse> tuple =
ChunkedDownloadUtils.downloadFirstChunk(range, pOptions, requestConditions, downloadFunc, true).block();
if (tuple == null) {
throw logger.logExceptionAsError(new IllegalStateException("Downloading first chunk returned null"));
}
BlobDownloadAsyncResponse downloadResponse = tuple.getT3();
ByteBuffer initialBuffer = FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap).block();
if (initialBuffer == null) {
throw logger.logExceptionAsError(new IllegalStateException("Collecting first chunk returned null"));
}
BlobProperties properties = BlobAsyncClientBase.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
} | .map(ByteBuffer::wrap).block(); | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
nit: Change throws in the reactive stream to `Mono.error` returns. `FluxUtil.monoError` will be useful here | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.map(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
throw logger.logExceptionAsError(
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties);
}).block();
} | throw logger.logExceptionAsError( | public BlobInputStream openInputStream(BlobInputStreamOptions options) {
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction<BlobRange, BlobRequestConditions, Mono<BlobDownloadAsyncResponse>> downloadFunc =
(chunkRange, conditions) -> client.downloadWithResponse(chunkRange, null, conditions, false);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(logger,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(logger, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties));
}).block();
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} | class BlobClientBase {
private final ClientLogger logger = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName}
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient}
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName}
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
* <p>
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream() {
return openInputStream(null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
* <p>
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists}
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
Mono<Response<Boolean>> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
* <p>
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
* <p>
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono<Response<String>> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future. Use {@link
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.BlobClient.downloadContent}
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method will be deprecated in the future.
* Use {@link
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono<BlobDownloadResponse> download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* <p>This method supports downloads up to 2GB of data.
* Use {@link
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.</p>
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<BlobDownloadContentResponse> download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether or not to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<Void>> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map<String, String> metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags}
*
* <p>For more information, see the
* <a href="https:
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map<String, String> getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout,
Context context) {
Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags
*
* <p>For more information, see the
* <a href="https:
*
* @param tags Tags to associate with the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setTags(Map<String, String> tags) {
this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE);
}
/**
* Sets user defined tags. The specified tags in this method will replace existing tags. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) {
Mono<Response<Void>> response = client.setTagsWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot}
*
* <p>For more information, see the
* <a href="https:
*
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobClientBase createSnapshot() {
return createSnapshotWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Creates a read-only snapshot of the blob.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use
* {@link BlobClientBase
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobClientBase>> response = client
.createSnapshotWithResponse(metadata, requestConditions, context)
.map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue())));
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessTier(AccessTier tier) {
setAccessTierWithResponse(tier, null, null, null, Context.NONE);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param tier The new tier for the blob.
* @param priority Optional priority to set for re-hydrating blobs.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId,
Duration timeout, Context context) {
return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId),
timeout, context);
}
/**
* Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in
* a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
* the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's
* etag.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link BlobSetAccessTierOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options,
Duration timeout, Context context) {
return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete}
*
* <p>For more information, see the
* <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void undelete() {
undeleteWithResponse(null, Context.NONE);
}
/**
* Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> undeleteWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.undeleteWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo}
*
* <p>For more information, see the
* <a href="https:
*
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public StorageAccountInfo getAccountInfo() {
return getAccountInfoWithResponse(null, Context.NONE).getValue();
}
/**
* Returns the sku name and account kind for the account.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The sku name and account kind.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) {
Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey);
}
/**
* Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, accountName,
context);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return this.client.generateSas(blobServiceSasSignatureValues);
}
/**
* Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return this.client.generateSas(blobServiceSasSignatureValues, context);
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue();
}
/**
* Opens a blob input stream to query the blob.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) {
BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block();
if (response == null) {
throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void query(OutputStream stream, String expression) {
queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire blob into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse
*
* @param queryOptions {@link BlobQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", queryOptions);
StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream());
Mono<BlobQueryResponse> download = client
.queryWithResponse(queryOptions, context)
.flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> {
try {
outputStream.write(FluxUtil.byteBufferToArray(buffer));
return outputStream;
} catch (IOException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex)));
}
}).thenReturn(new BlobQueryResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @return The immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immutabilityPolicy) {
return setImmutabilityPolicyWithResponse(immutabilityPolicy, null, null, Context.NONE).getValue();
}
/**
* Sets the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse
*
* @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobImmutabilityPolicy> setImmutabilityPolicyWithResponse(BlobImmutabilityPolicy immutabilityPolicy,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<BlobImmutabilityPolicy>> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy}
*
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteImmutabilityPolicy() {
deleteImmutabilityPolicyWithResponse(null, Context.NONE).getValue();
}
/**
* Delete the immutability policy on a blob, blob snapshot or blob version.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the immutability policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteImmutabilityPolicyWithResponse(Duration timeout, Context context) {
Mono<Response<Void>> response = client.deleteImmutabilityPolicyWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @return The legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobLegalHoldResult setLegalHold(boolean legalHold) {
return setLegalHoldWithResponse(legalHold, null, Context.NONE).getValue();
}
/**
* Sets a legal hold on the blob.
* <p> NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with
* immutable storage with versioning enabled to call this API.</p>
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse
*
* @param legalHold Whether or not you want a legal hold on the blob.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the legal hold result.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<BlobLegalHoldResult> setLegalHoldWithResponse(boolean legalHold, Duration timeout, Context context) {
Mono<Response<BlobLegalHoldResult>> response = client.setLegalHoldWithResponse(legalHold, context);
return blockWithOptionalTimeout(response, timeout);
}
} |
why not AzureCredentialType.NAMED_KEY_CREDENTIAL here? | public AzureCredentialType azureCredentialType() {
return AzureCredentialType.KEY_CREDENTIAL;
} | return AzureCredentialType.KEY_CREDENTIAL; | public AzureCredentialType azureCredentialType() {
return AzureCredentialType.NAMED_KEY_CREDENTIAL;
} | class NamedKeyAuthenticationDescriptor implements AuthenticationDescriptor<AzureNamedKeyCredentialProvider> {
private final Consumer<AzureNamedKeyCredentialProvider> consumer;
public NamedKeyAuthenticationDescriptor(Consumer<AzureNamedKeyCredentialProvider> consumer) {
this.consumer = consumer;
}
@Override
@Override
public AzureCredentialResolver<AzureNamedKeyCredentialProvider> azureCredentialResolver() {
return new AzureNamedKeyCredentialResolver();
}
@Override
public Consumer<AzureNamedKeyCredentialProvider> consumer() {
return consumer;
}
} | class NamedKeyAuthenticationDescriptor implements AuthenticationDescriptor<AzureNamedKeyCredentialProvider> {
private final Consumer<AzureNamedKeyCredentialProvider> consumer;
public NamedKeyAuthenticationDescriptor(Consumer<AzureNamedKeyCredentialProvider> consumer) {
this.consumer = consumer;
}
@Override
@Override
public AzureCredentialResolver<AzureNamedKeyCredentialProvider> azureCredentialResolver() {
return new AzureNamedKeyCredentialResolver();
}
@Override
public Consumer<AzureNamedKeyCredentialProvider> consumer() {
return consumer;
}
} |
may namedKey throw npe here? | public AzureNamedKeyCredentialProvider resolve(AzureProperties properties) {
if (!isResolvable(properties)) {
return null;
}
NamedKeyProperties namedKey = ((NamedKeyAware) properties).getNamedKey();
if (!StringUtils.hasText(namedKey.getName()) || !StringUtils.hasText(namedKey.getKey())) {
return null;
}
return new AzureNamedKeyCredentialProvider(namedKey.getName(), namedKey.getKey());
} | if (!StringUtils.hasText(namedKey.getName()) || !StringUtils.hasText(namedKey.getKey())) { | public AzureNamedKeyCredentialProvider resolve(AzureProperties properties) {
if (!isResolvable(properties)) {
return null;
}
NamedKeyProperties namedKey = ((NamedKeyAware) properties).getNamedKey();
if (namedKey == null || !StringUtils.hasText(namedKey.getName()) || !StringUtils.hasText(namedKey.getKey())) {
return null;
}
return new AzureNamedKeyCredentialProvider(namedKey.getName(), namedKey.getKey());
} | class AzureNamedKeyCredentialResolver implements AzureCredentialResolver<AzureNamedKeyCredentialProvider> {
@Override
@Override
public boolean isResolvable(AzureProperties properties) {
return properties instanceof NamedKeyAware;
}
} | class AzureNamedKeyCredentialResolver implements AzureCredentialResolver<AzureNamedKeyCredentialProvider> {
@Override
@Override
public boolean isResolvable(AzureProperties properties) {
return properties instanceof NamedKeyAware;
}
} |
just return true? | public boolean isResolvable(AzureProperties properties) {
return true;
} | return true; | public boolean isResolvable(AzureProperties properties) {
return true;
} | class AzureTokenCredentialResolver implements AzureCredentialResolver<AzureTokenCredentialProvider> {
@Override
public AzureTokenCredentialProvider resolve(AzureProperties properties) {
final TokenCredentialProperties credential = properties.getCredential();
if (credential == null) {
return null;
}
if (StringUtils.hasText(credential.getTenantId()) && StringUtils.hasText(
credential.getClientId()) && StringUtils.hasText(credential.getClientSecret())) {
final ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder()
.clientId(credential.getClientId())
.clientSecret(credential.getClientSecret())
.tenantId(credential.getTenantId())
.build();
return new AzureTokenCredentialProvider(clientSecretCredential);
}
if (StringUtils.hasText(credential.getTenantId()) && StringUtils.hasText(credential.getCertificatePath())) {
final ClientCertificateCredential clientCertificateCredential = new ClientCertificateCredentialBuilder()
.clientId(credential.getClientId())
.pemCertificate(credential.getCertificatePath())
.tenantId(credential.getTenantId())
.build();
return new AzureTokenCredentialProvider(clientCertificateCredential);
}
return null;
}
/**
* All SDKs will support this type.
*
* @param properties Azure properties
* @return Resolvable or not
*/
@Override
} | class AzureTokenCredentialResolver implements AzureCredentialResolver<AzureTokenCredentialProvider> {
@Override
public AzureTokenCredentialProvider resolve(AzureProperties properties) {
final TokenCredentialProperties credential = properties.getCredential();
if (credential == null) {
return null;
}
if (StringUtils.hasText(credential.getTenantId()) && StringUtils.hasText(
credential.getClientId()) && StringUtils.hasText(credential.getClientSecret())) {
final ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder()
.clientId(credential.getClientId())
.clientSecret(credential.getClientSecret())
.tenantId(credential.getTenantId())
.build();
return new AzureTokenCredentialProvider(clientSecretCredential);
}
if (StringUtils.hasText(credential.getTenantId()) && StringUtils.hasText(credential.getCertificatePath())) {
final ClientCertificateCredential clientCertificateCredential = new ClientCertificateCredentialBuilder()
.clientId(credential.getClientId())
.pemCertificate(credential.getCertificatePath())
.tenantId(credential.getTenantId())
.build();
return new AzureTokenCredentialProvider(clientCertificateCredential);
}
return null;
}
/**
* All SDKs will support this type.
*
* @param properties Azure properties
* @return Resolvable or not
*/
@Override
} |
Yes, all azure properties could be resolved. | public boolean isResolvable(AzureProperties properties) {
return true;
} | return true; | public boolean isResolvable(AzureProperties properties) {
return true;
} | class AzureTokenCredentialResolver implements AzureCredentialResolver<AzureTokenCredentialProvider> {
@Override
public AzureTokenCredentialProvider resolve(AzureProperties properties) {
final TokenCredentialProperties credential = properties.getCredential();
if (credential == null) {
return null;
}
if (StringUtils.hasText(credential.getTenantId()) && StringUtils.hasText(
credential.getClientId()) && StringUtils.hasText(credential.getClientSecret())) {
final ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder()
.clientId(credential.getClientId())
.clientSecret(credential.getClientSecret())
.tenantId(credential.getTenantId())
.build();
return new AzureTokenCredentialProvider(clientSecretCredential);
}
if (StringUtils.hasText(credential.getTenantId()) && StringUtils.hasText(credential.getCertificatePath())) {
final ClientCertificateCredential clientCertificateCredential = new ClientCertificateCredentialBuilder()
.clientId(credential.getClientId())
.pemCertificate(credential.getCertificatePath())
.tenantId(credential.getTenantId())
.build();
return new AzureTokenCredentialProvider(clientCertificateCredential);
}
return null;
}
/**
* All SDKs will support this type.
*
* @param properties Azure properties
* @return Resolvable or not
*/
@Override
} | class AzureTokenCredentialResolver implements AzureCredentialResolver<AzureTokenCredentialProvider> {
@Override
public AzureTokenCredentialProvider resolve(AzureProperties properties) {
final TokenCredentialProperties credential = properties.getCredential();
if (credential == null) {
return null;
}
if (StringUtils.hasText(credential.getTenantId()) && StringUtils.hasText(
credential.getClientId()) && StringUtils.hasText(credential.getClientSecret())) {
final ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder()
.clientId(credential.getClientId())
.clientSecret(credential.getClientSecret())
.tenantId(credential.getTenantId())
.build();
return new AzureTokenCredentialProvider(clientSecretCredential);
}
if (StringUtils.hasText(credential.getTenantId()) && StringUtils.hasText(credential.getCertificatePath())) {
final ClientCertificateCredential clientCertificateCredential = new ClientCertificateCredentialBuilder()
.clientId(credential.getClientId())
.pemCertificate(credential.getCertificatePath())
.tenantId(credential.getTenantId())
.build();
return new AzureTokenCredentialProvider(clientCertificateCredential);
}
return null;
}
/**
* All SDKs will support this type.
*
* @param properties Azure properties
* @return Resolvable or not
*/
@Override
} |
why add null if no custom entities actions are supplied by the user? | private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
} | customEntitiesTasks.add(null); | private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoriesResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoryResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} |
in the other actions you set the ActionName on this actionResult type, like on L678. Should the same be done to the custom results? | private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoriesResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
} | TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult, | private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoryResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} |
For example, Input = [Action1, null, Action2, null]. We shouldn't be filtering the inputs. If you pass in something broken, you either raise an exception/fail the call on the client-side, or you include it in the request. Hiding it just hides a programming error. | private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
} | customEntitiesTasks.add(null); | private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoriesResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoryResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} |
Good catch! I will update it and the corresponding tests. | private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoriesResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
} | TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult, | private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoryResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} |
I see, so this stops the SDK code from blowing up? Have we seen users pass `null` in the request before? | private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
} | customEntitiesTasks.add(null); | private List<CustomEntitiesTask> toCustomEntitiesTask(TextAnalyticsActions actions) {
final List<CustomEntitiesTask> customEntitiesTasks = new ArrayList<>();
for (RecognizeCustomEntitiesAction action : actions.getRecognizeCustomEntitiesActions()) {
if (action == null) {
customEntitiesTasks.add(null);
} else {
customEntitiesTasks.add(
new CustomEntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomEntitiesTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customEntitiesTasks;
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoriesResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String CUSTOM_ENTITY_RECOGNITION_TASKS = "customEntityRecognitionTasks";
private static final String CUSTOM_SINGLE_CLASSIFICATION_TASKS = "customClassificationTasks";
private static final String CUSTOM_MULTI_CLASSIFICATION_TASKS = "customMultiClassificationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_PII_TASKS, ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS,
EXTRACTIVE_SUMMARIZATION_TASKS, CUSTOM_ENTITY_RECOGNITION_TASKS, CUSTOM_SINGLE_CLASSIFICATION_TASKS,
CUSTOM_MULTI_CLASSIFICATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
if (actions.getRecognizeCustomEntitiesActions() != null) {
jobManifestTasks.setCustomEntityRecognitionTasks(toCustomEntitiesTask(actions));
}
if (actions.getSingleCategoryClassifyActions() != null) {
jobManifestTasks.setCustomSingleClassificationTasks(toCustomSingleClassificationTask(actions));
}
if (actions.getMultiCategoryClassifyActions() != null) {
jobManifestTasks.setCustomMultiClassificationTasks(toCustomMultiClassificationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
entityLinkingTasks.add(
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private List<CustomSingleClassificationTask> toCustomSingleClassificationTask(TextAnalyticsActions actions) {
final List<CustomSingleClassificationTask> customSingleClassificationTask = new ArrayList<>();
for (SingleCategoryClassifyAction action : actions.getSingleCategoryClassifyActions()) {
if (action == null) {
customSingleClassificationTask.add(null);
} else {
customSingleClassificationTask.add(
new CustomSingleClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomSingleClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customSingleClassificationTask;
}
private List<CustomMultiClassificationTask> toCustomMultiClassificationTask(TextAnalyticsActions actions) {
final List<CustomMultiClassificationTask> customMultiClassificationTask = new ArrayList<>();
for (MultiCategoryClassifyAction action : actions.getMultiCategoryClassifyActions()) {
if (action == null) {
customMultiClassificationTask.add(null);
} else {
customMultiClassificationTask.add(
new CustomMultiClassificationTask()
.setTaskName(action.getActionName())
.setParameters(
new CustomMultiClassificationTaskParameters()
.setProjectName(action.getProjectName())
.setDeploymentName(action.getDeploymentName())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return customMultiClassificationTask;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
final List<TasksStateTasksCustomEntityRecognitionTasksItem> customEntityRecognitionTasksItems =
tasksStateTasks.getCustomEntityRecognitionTasks();
final List<TasksStateTasksCustomSingleClassificationTasksItem> customSingleClassificationTasksItems =
tasksStateTasks.getCustomSingleClassificationTasks();
final List<TasksStateTasksCustomMultiClassificationTasksItem> customMultiClassificationTasksItems =
tasksStateTasks.getCustomMultiClassificationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
List<RecognizeCustomEntitiesActionResult> recognizeCustomEntitiesActionResults = new ArrayList<>();
List<SingleCategoryClassifyActionResult> singleCategoryClassifyActionResults =
new ArrayList<>();
List<MultiCategoryClassifyActionResult> multiCategoryClassifyActionResults =
new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customEntityRecognitionTasksItems)) {
for (int i = 0; i < customEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksCustomEntityRecognitionTasksItem taskItem =
customEntityRecognitionTasksItems.get(i);
final RecognizeCustomEntitiesActionResult actionResult = new RecognizeCustomEntitiesActionResult();
final CustomEntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeCustomEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeCustomEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeCustomEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customSingleClassificationTasksItems)) {
for (int i = 0; i < customSingleClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomSingleClassificationTasksItem taskItem =
customSingleClassificationTasksItems.get(i);
final SingleCategoryClassifyActionResult actionResult =
new SingleCategoryClassifyActionResult();
final CustomSingleClassificationResult results = taskItem.getResults();
if (results != null) {
SingleCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toSingleCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
singleCategoryClassifyActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(customMultiClassificationTasksItems)) {
for (int i = 0; i < customMultiClassificationTasksItems.size(); i++) {
final TasksStateTasksCustomMultiClassificationTasksItem taskItem =
customMultiClassificationTasksItems.get(i);
final MultiCategoryClassifyActionResult actionResult =
new MultiCategoryClassifyActionResult();
final CustomMultiClassificationResult results = taskItem.getResults();
if (results != null) {
MultiCategoryClassifyActionResultPropertiesHelper.setDocumentsResults(actionResult,
toMultiCategoryClassifyResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
multiCategoryClassifyActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else if (CUSTOM_ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeCustomEntitiesActionResults.get(taskIndex);
} else if (CUSTOM_SINGLE_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = singleCategoryClassifyActionResults.get(taskIndex);
} else if (CUSTOM_MULTI_CLASSIFICATION_TASKS.equals(taskName)) {
actionResult = multiCategoryClassifyActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeCustomEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeCustomEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifySingleCategoryResults(analyzeActionsResult,
IterableStream.of(singleCategoryClassifyActionResults));
AnalyzeActionsResultPropertiesHelper.setClassifyMultiCategoryResults(analyzeActionsResult,
IterableStream.of(multiCategoryClassifyActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} |
Do we want to have enum/constants to represent kinds value? | private static boolean isWebApp(SiteInner inner) {
boolean ret = false;
if (inner.kind() == null) {
ret = true;
} else {
List<String> kinds = Arrays.asList(inner.kind().split(Pattern.quote(",")));
if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) {
ret = true;
}
}
return ret;
} | if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) { | private static boolean isWebApp(SiteInner inner) {
boolean ret = false;
if (inner.kind() == null) {
ret = true;
} else {
List<String> kinds = Arrays.asList(inner.kind().split(Pattern.quote(",")));
if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) {
ret = true;
}
}
return ret;
} | class WebAppsImpl
extends GroupableResourcesImpl<WebApp, WebAppImpl, SiteInner, WebAppsClient, AppServiceManager>
implements WebApps, SupportsBatchDeletion {
public WebAppsImpl(final AppServiceManager manager) {
super(manager.serviceClient().getWebApps(), manager);
}
@Override
public Mono<WebApp> getByResourceGroupAsync(final String resourceGroupName, final String name) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null."));
}
if (CoreUtils.isNullOrEmpty(name)) {
return Mono.error(
new IllegalArgumentException("Parameter 'name' is required and cannot be null."));
}
return this
.getInnerAsync(resourceGroupName, name)
.flatMap(
siteInner ->
Mono
.zip(
this.inner().getConfigurationAsync(resourceGroupName, name),
this.inner().getDiagnosticLogsConfigurationAsync(resourceGroupName, name),
(SiteConfigResourceInner siteConfigResourceInner, SiteLogsConfigInner logsConfigInner) ->
wrapModel(siteInner, siteConfigResourceInner, logsConfigInner)));
}
@Override
protected Mono<SiteInner> getInnerAsync(String resourceGroupName, String name) {
return this.inner().getByResourceGroupAsync(resourceGroupName, name);
}
@Override
protected Mono<Void> deleteInnerAsync(String resourceGroupName, String name) {
return inner().deleteAsync(resourceGroupName, name).then();
}
@Override
protected WebAppImpl wrapModel(String name) {
return new WebAppImpl(name, new SiteInner().withKind("app"), null, null, this.manager());
}
protected WebAppImpl wrapModel(SiteInner inner, SiteConfigResourceInner siteConfig, SiteLogsConfigInner logConfig) {
if (inner == null) {
return null;
}
return new WebAppImpl(inner.name(), inner, siteConfig, logConfig, this.manager());
}
@Override
protected WebAppImpl wrapModel(SiteInner inner) {
return wrapModel(inner, null, null);
}
@Override
public WebAppImpl define(String name) {
return wrapModel(name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
return BatchDeletionImpl.deleteByIdsAsync(ids, this::deleteInnerAsync);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
if (ids != null && !ids.isEmpty()) {
this.deleteByIdsAsync(ids).blockLast();
}
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIds(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public PagedIterable<WebAppBasic> listByResourceGroup(String resourceGroupName) {
return new PagedIterable<>(this.listByResourceGroupAsync(resourceGroupName));
}
@Override
public PagedFlux<WebAppBasic> listByResourceGroupAsync(String resourceGroupName) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null.")));
}
return PagedConverter.flatMapPage(inner().listByResourceGroupAsync(resourceGroupName),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
@Override
public PagedIterable<WebAppBasic> list() {
return new PagedIterable<>(this.listAsync());
}
@Override
public PagedFlux<WebAppBasic> listAsync() {
return PagedConverter.flatMapPage(inner().listAsync(),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
} | class WebAppsImpl
extends GroupableResourcesImpl<WebApp, WebAppImpl, SiteInner, WebAppsClient, AppServiceManager>
implements WebApps, SupportsBatchDeletion {
public WebAppsImpl(final AppServiceManager manager) {
super(manager.serviceClient().getWebApps(), manager);
}
@Override
public Mono<WebApp> getByResourceGroupAsync(final String resourceGroupName, final String name) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null."));
}
if (CoreUtils.isNullOrEmpty(name)) {
return Mono.error(
new IllegalArgumentException("Parameter 'name' is required and cannot be null."));
}
return this
.getInnerAsync(resourceGroupName, name)
.flatMap(
siteInner ->
Mono
.zip(
this.inner().getConfigurationAsync(resourceGroupName, name),
this.inner().getDiagnosticLogsConfigurationAsync(resourceGroupName, name),
(SiteConfigResourceInner siteConfigResourceInner, SiteLogsConfigInner logsConfigInner) ->
wrapModel(siteInner, siteConfigResourceInner, logsConfigInner)));
}
@Override
protected Mono<SiteInner> getInnerAsync(String resourceGroupName, String name) {
return this.inner().getByResourceGroupAsync(resourceGroupName, name);
}
@Override
protected Mono<Void> deleteInnerAsync(String resourceGroupName, String name) {
return inner().deleteAsync(resourceGroupName, name).then();
}
@Override
protected WebAppImpl wrapModel(String name) {
return new WebAppImpl(name, new SiteInner().withKind("app"), null, null, this.manager());
}
protected WebAppImpl wrapModel(SiteInner inner, SiteConfigResourceInner siteConfig, SiteLogsConfigInner logConfig) {
if (inner == null) {
return null;
}
return new WebAppImpl(inner.name(), inner, siteConfig, logConfig, this.manager());
}
@Override
protected WebAppImpl wrapModel(SiteInner inner) {
return wrapModel(inner, null, null);
}
@Override
public WebAppImpl define(String name) {
return wrapModel(name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
return BatchDeletionImpl.deleteByIdsAsync(ids, this::deleteInnerAsync);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
if (ids != null && !ids.isEmpty()) {
this.deleteByIdsAsync(ids).blockLast();
}
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIds(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public PagedIterable<WebAppBasic> listByResourceGroup(String resourceGroupName) {
return new PagedIterable<>(this.listByResourceGroupAsync(resourceGroupName));
}
@Override
public PagedFlux<WebAppBasic> listByResourceGroupAsync(String resourceGroupName) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null.")));
}
return PagedConverter.flatMapPage(inner().listByResourceGroupAsync(resourceGroupName),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
@Override
public PagedIterable<WebAppBasic> list() {
return new PagedIterable<>(this.listAsync());
}
@Override
public PagedFlux<WebAppBasic> listAsync() {
return PagedConverter.flatMapPage(inner().listAsync(),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
} |
Currently they do not have a shared parent to put the constant, and it seems not worthwhile to make a new class for constant, as currently here is only 1 "kubernetes" duplicated in 2 places. | private static boolean isWebApp(SiteInner inner) {
boolean ret = false;
if (inner.kind() == null) {
ret = true;
} else {
List<String> kinds = Arrays.asList(inner.kind().split(Pattern.quote(",")));
if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) {
ret = true;
}
}
return ret;
} | if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) { | private static boolean isWebApp(SiteInner inner) {
boolean ret = false;
if (inner.kind() == null) {
ret = true;
} else {
List<String> kinds = Arrays.asList(inner.kind().split(Pattern.quote(",")));
if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) {
ret = true;
}
}
return ret;
} | class WebAppsImpl
extends GroupableResourcesImpl<WebApp, WebAppImpl, SiteInner, WebAppsClient, AppServiceManager>
implements WebApps, SupportsBatchDeletion {
public WebAppsImpl(final AppServiceManager manager) {
super(manager.serviceClient().getWebApps(), manager);
}
@Override
public Mono<WebApp> getByResourceGroupAsync(final String resourceGroupName, final String name) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null."));
}
if (CoreUtils.isNullOrEmpty(name)) {
return Mono.error(
new IllegalArgumentException("Parameter 'name' is required and cannot be null."));
}
return this
.getInnerAsync(resourceGroupName, name)
.flatMap(
siteInner ->
Mono
.zip(
this.inner().getConfigurationAsync(resourceGroupName, name),
this.inner().getDiagnosticLogsConfigurationAsync(resourceGroupName, name),
(SiteConfigResourceInner siteConfigResourceInner, SiteLogsConfigInner logsConfigInner) ->
wrapModel(siteInner, siteConfigResourceInner, logsConfigInner)));
}
@Override
protected Mono<SiteInner> getInnerAsync(String resourceGroupName, String name) {
return this.inner().getByResourceGroupAsync(resourceGroupName, name);
}
@Override
protected Mono<Void> deleteInnerAsync(String resourceGroupName, String name) {
return inner().deleteAsync(resourceGroupName, name).then();
}
@Override
protected WebAppImpl wrapModel(String name) {
return new WebAppImpl(name, new SiteInner().withKind("app"), null, null, this.manager());
}
protected WebAppImpl wrapModel(SiteInner inner, SiteConfigResourceInner siteConfig, SiteLogsConfigInner logConfig) {
if (inner == null) {
return null;
}
return new WebAppImpl(inner.name(), inner, siteConfig, logConfig, this.manager());
}
@Override
protected WebAppImpl wrapModel(SiteInner inner) {
return wrapModel(inner, null, null);
}
@Override
public WebAppImpl define(String name) {
return wrapModel(name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
return BatchDeletionImpl.deleteByIdsAsync(ids, this::deleteInnerAsync);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
if (ids != null && !ids.isEmpty()) {
this.deleteByIdsAsync(ids).blockLast();
}
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIds(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public PagedIterable<WebAppBasic> listByResourceGroup(String resourceGroupName) {
return new PagedIterable<>(this.listByResourceGroupAsync(resourceGroupName));
}
@Override
public PagedFlux<WebAppBasic> listByResourceGroupAsync(String resourceGroupName) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null.")));
}
return PagedConverter.flatMapPage(inner().listByResourceGroupAsync(resourceGroupName),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
@Override
public PagedIterable<WebAppBasic> list() {
return new PagedIterable<>(this.listAsync());
}
@Override
public PagedFlux<WebAppBasic> listAsync() {
return PagedConverter.flatMapPage(inner().listAsync(),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
} | class WebAppsImpl
extends GroupableResourcesImpl<WebApp, WebAppImpl, SiteInner, WebAppsClient, AppServiceManager>
implements WebApps, SupportsBatchDeletion {
public WebAppsImpl(final AppServiceManager manager) {
super(manager.serviceClient().getWebApps(), manager);
}
@Override
public Mono<WebApp> getByResourceGroupAsync(final String resourceGroupName, final String name) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null."));
}
if (CoreUtils.isNullOrEmpty(name)) {
return Mono.error(
new IllegalArgumentException("Parameter 'name' is required and cannot be null."));
}
return this
.getInnerAsync(resourceGroupName, name)
.flatMap(
siteInner ->
Mono
.zip(
this.inner().getConfigurationAsync(resourceGroupName, name),
this.inner().getDiagnosticLogsConfigurationAsync(resourceGroupName, name),
(SiteConfigResourceInner siteConfigResourceInner, SiteLogsConfigInner logsConfigInner) ->
wrapModel(siteInner, siteConfigResourceInner, logsConfigInner)));
}
@Override
protected Mono<SiteInner> getInnerAsync(String resourceGroupName, String name) {
return this.inner().getByResourceGroupAsync(resourceGroupName, name);
}
@Override
protected Mono<Void> deleteInnerAsync(String resourceGroupName, String name) {
return inner().deleteAsync(resourceGroupName, name).then();
}
@Override
protected WebAppImpl wrapModel(String name) {
return new WebAppImpl(name, new SiteInner().withKind("app"), null, null, this.manager());
}
protected WebAppImpl wrapModel(SiteInner inner, SiteConfigResourceInner siteConfig, SiteLogsConfigInner logConfig) {
if (inner == null) {
return null;
}
return new WebAppImpl(inner.name(), inner, siteConfig, logConfig, this.manager());
}
@Override
protected WebAppImpl wrapModel(SiteInner inner) {
return wrapModel(inner, null, null);
}
@Override
public WebAppImpl define(String name) {
return wrapModel(name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
return BatchDeletionImpl.deleteByIdsAsync(ids, this::deleteInnerAsync);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
if (ids != null && !ids.isEmpty()) {
this.deleteByIdsAsync(ids).blockLast();
}
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIds(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public PagedIterable<WebAppBasic> listByResourceGroup(String resourceGroupName) {
return new PagedIterable<>(this.listByResourceGroupAsync(resourceGroupName));
}
@Override
public PagedFlux<WebAppBasic> listByResourceGroupAsync(String resourceGroupName) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null.")));
}
return PagedConverter.flatMapPage(inner().listByResourceGroupAsync(resourceGroupName),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
@Override
public PagedIterable<WebAppBasic> list() {
return new PagedIterable<>(this.listAsync());
}
@Override
public PagedFlux<WebAppBasic> listAsync() {
return PagedConverter.flatMapPage(inner().listAsync(),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
} |
ok, it's fine for now. | private static boolean isWebApp(SiteInner inner) {
boolean ret = false;
if (inner.kind() == null) {
ret = true;
} else {
List<String> kinds = Arrays.asList(inner.kind().split(Pattern.quote(",")));
if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) {
ret = true;
}
}
return ret;
} | if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) { | private static boolean isWebApp(SiteInner inner) {
boolean ret = false;
if (inner.kind() == null) {
ret = true;
} else {
List<String> kinds = Arrays.asList(inner.kind().split(Pattern.quote(",")));
if ((kinds.contains("app") || kinds.contains("api")) && !kinds.contains("kubernetes")) {
ret = true;
}
}
return ret;
} | class WebAppsImpl
extends GroupableResourcesImpl<WebApp, WebAppImpl, SiteInner, WebAppsClient, AppServiceManager>
implements WebApps, SupportsBatchDeletion {
public WebAppsImpl(final AppServiceManager manager) {
super(manager.serviceClient().getWebApps(), manager);
}
@Override
public Mono<WebApp> getByResourceGroupAsync(final String resourceGroupName, final String name) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null."));
}
if (CoreUtils.isNullOrEmpty(name)) {
return Mono.error(
new IllegalArgumentException("Parameter 'name' is required and cannot be null."));
}
return this
.getInnerAsync(resourceGroupName, name)
.flatMap(
siteInner ->
Mono
.zip(
this.inner().getConfigurationAsync(resourceGroupName, name),
this.inner().getDiagnosticLogsConfigurationAsync(resourceGroupName, name),
(SiteConfigResourceInner siteConfigResourceInner, SiteLogsConfigInner logsConfigInner) ->
wrapModel(siteInner, siteConfigResourceInner, logsConfigInner)));
}
@Override
protected Mono<SiteInner> getInnerAsync(String resourceGroupName, String name) {
return this.inner().getByResourceGroupAsync(resourceGroupName, name);
}
@Override
protected Mono<Void> deleteInnerAsync(String resourceGroupName, String name) {
return inner().deleteAsync(resourceGroupName, name).then();
}
@Override
protected WebAppImpl wrapModel(String name) {
return new WebAppImpl(name, new SiteInner().withKind("app"), null, null, this.manager());
}
protected WebAppImpl wrapModel(SiteInner inner, SiteConfigResourceInner siteConfig, SiteLogsConfigInner logConfig) {
if (inner == null) {
return null;
}
return new WebAppImpl(inner.name(), inner, siteConfig, logConfig, this.manager());
}
@Override
protected WebAppImpl wrapModel(SiteInner inner) {
return wrapModel(inner, null, null);
}
@Override
public WebAppImpl define(String name) {
return wrapModel(name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
return BatchDeletionImpl.deleteByIdsAsync(ids, this::deleteInnerAsync);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
if (ids != null && !ids.isEmpty()) {
this.deleteByIdsAsync(ids).blockLast();
}
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIds(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public PagedIterable<WebAppBasic> listByResourceGroup(String resourceGroupName) {
return new PagedIterable<>(this.listByResourceGroupAsync(resourceGroupName));
}
@Override
public PagedFlux<WebAppBasic> listByResourceGroupAsync(String resourceGroupName) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null.")));
}
return PagedConverter.flatMapPage(inner().listByResourceGroupAsync(resourceGroupName),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
@Override
public PagedIterable<WebAppBasic> list() {
return new PagedIterable<>(this.listAsync());
}
@Override
public PagedFlux<WebAppBasic> listAsync() {
return PagedConverter.flatMapPage(inner().listAsync(),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
} | class WebAppsImpl
extends GroupableResourcesImpl<WebApp, WebAppImpl, SiteInner, WebAppsClient, AppServiceManager>
implements WebApps, SupportsBatchDeletion {
public WebAppsImpl(final AppServiceManager manager) {
super(manager.serviceClient().getWebApps(), manager);
}
@Override
public Mono<WebApp> getByResourceGroupAsync(final String resourceGroupName, final String name) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null."));
}
if (CoreUtils.isNullOrEmpty(name)) {
return Mono.error(
new IllegalArgumentException("Parameter 'name' is required and cannot be null."));
}
return this
.getInnerAsync(resourceGroupName, name)
.flatMap(
siteInner ->
Mono
.zip(
this.inner().getConfigurationAsync(resourceGroupName, name),
this.inner().getDiagnosticLogsConfigurationAsync(resourceGroupName, name),
(SiteConfigResourceInner siteConfigResourceInner, SiteLogsConfigInner logsConfigInner) ->
wrapModel(siteInner, siteConfigResourceInner, logsConfigInner)));
}
@Override
protected Mono<SiteInner> getInnerAsync(String resourceGroupName, String name) {
return this.inner().getByResourceGroupAsync(resourceGroupName, name);
}
@Override
protected Mono<Void> deleteInnerAsync(String resourceGroupName, String name) {
return inner().deleteAsync(resourceGroupName, name).then();
}
@Override
protected WebAppImpl wrapModel(String name) {
return new WebAppImpl(name, new SiteInner().withKind("app"), null, null, this.manager());
}
protected WebAppImpl wrapModel(SiteInner inner, SiteConfigResourceInner siteConfig, SiteLogsConfigInner logConfig) {
if (inner == null) {
return null;
}
return new WebAppImpl(inner.name(), inner, siteConfig, logConfig, this.manager());
}
@Override
protected WebAppImpl wrapModel(SiteInner inner) {
return wrapModel(inner, null, null);
}
@Override
public WebAppImpl define(String name) {
return wrapModel(name);
}
@Override
public Flux<String> deleteByIdsAsync(Collection<String> ids) {
return BatchDeletionImpl.deleteByIdsAsync(ids, this::deleteInnerAsync);
}
@Override
public Flux<String> deleteByIdsAsync(String... ids) {
return this.deleteByIdsAsync(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
if (ids != null && !ids.isEmpty()) {
this.deleteByIdsAsync(ids).blockLast();
}
}
@Override
public void deleteByIds(String... ids) {
this.deleteByIds(new ArrayList<>(Arrays.asList(ids)));
}
@Override
public PagedIterable<WebAppBasic> listByResourceGroup(String resourceGroupName) {
return new PagedIterable<>(this.listByResourceGroupAsync(resourceGroupName));
}
@Override
public PagedFlux<WebAppBasic> listByResourceGroupAsync(String resourceGroupName) {
if (CoreUtils.isNullOrEmpty(resourceGroupName)) {
return new PagedFlux<>(() -> Mono.error(
new IllegalArgumentException("Parameter 'resourceGroupName' is required and cannot be null.")));
}
return PagedConverter.flatMapPage(inner().listByResourceGroupAsync(resourceGroupName),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
@Override
public PagedIterable<WebAppBasic> list() {
return new PagedIterable<>(this.listAsync());
}
@Override
public PagedFlux<WebAppBasic> listAsync() {
return PagedConverter.flatMapPage(inner().listAsync(),
inner -> isWebApp(inner) ? Mono.just(new WebAppBasicImpl(inner, this.manager())) : Mono.empty());
}
} |
is this intentional to return colls path for PartitionKey ? | private static String getResourceSegment(ResourceType resourceType) {
switch (resourceType) {
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case DocumentCollection:
case PartitionKey:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_PATH_SEGMENT;
case DatabaseAccount:
return "";
case ClientTelemetry:
return "";
case ClientEncryptionKey:
return Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
default:
return null;
}
} | case PartitionKey: | private static String getResourceSegment(ResourceType resourceType) {
switch (resourceType) {
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case DocumentCollection:
case PartitionKey:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_PATH_SEGMENT;
case DatabaseAccount:
return "";
case ClientTelemetry:
return "";
case ClientEncryptionKey:
return Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
default:
return null;
}
} | class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider {
private static final String AUTH_PREFIX = "type=master&ver=1.0&sig=";
private final AzureKeyCredential credential;
private volatile String currentCredentialKey;
private volatile MacPool macPool;
private final Lock macInstanceLock = new ReentrantLock();
public BaseAuthorizationTokenProvider(AzureKeyCredential credential) {
this.credential = credential;
reInitializeIfPossible();
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb.
* @param resourceIdOrFullName the resource id or full name
* @param resourceType the resource type.
* @param headers the request headers.
* @return the key authorization signature.
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
ResourceType resourceType,
Map<String, String> headers) {
return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName,
BaseAuthorizationTokenProvider.getResourceSegment(resourceType), headers);
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb
* @param resourceIdOrFullName the resource id or full name
* @param resourceSegment the resource segment
* @param headers the request headers
* @return the key authorization signature
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
String resourceSegment,
Map<String, String> headers) {
if (verb == null) {
throw new IllegalArgumentException("verb");
}
if (resourceIdOrFullName == null) {
resourceIdOrFullName = "";
}
if (resourceSegment == null) {
throw new IllegalArgumentException("resourceSegment");
}
if (headers == null) {
throw new IllegalArgumentException("headers");
}
if (StringUtils.isEmpty(this.credential.getKey())) {
throw new IllegalArgumentException("key credentials cannot be empty");
}
if(!PathsHelper.isNameBased(resourceIdOrFullName)) {
resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT);
}
StringBuilder body = new StringBuilder();
body.append(ModelBridgeInternal.toLower(verb))
.append('\n')
.append(resourceSegment)
.append('\n')
.append(resourceIdOrFullName)
.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
MacPool.ReUsableMac macInstance = getReUseableMacInstance();
try {
byte[] digest = macInstance.get().doFinal(body.toString().getBytes(StandardCharsets.UTF_8));
String auth = Utils.encodeBase64String(digest);
return AUTH_PREFIX + auth;
}
finally {
macInstance.close();
}
}
/**
* This API is a helper method to create auth header based on client request using resourceTokens.
*
* @param resourceTokens the resource tokens.
* @param path the path.
* @param resourceId the resource id.
* @return the authorization token.
*/
public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens,
String path,
String resourceId) {
if (resourceTokens == null) {
throw new IllegalArgumentException("resourceTokens");
}
String resourceToken = null;
if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) {
resourceToken = resourceTokens.get(resourceId);
} else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) {
if (resourceTokens.size() > 0) {
resourceToken = resourceTokens.values().iterator().next();
}
} else {
String[] pathParts = StringUtils.split(path, "/");
String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions",
"attachments", "media", "conflicts"};
HashSet<String> resourceTypesSet = new HashSet<String>();
Collections.addAll(resourceTypesSet, resourceTypes);
for (int i = pathParts.length - 1; i >= 0; --i) {
if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) {
resourceToken = resourceTokens.get(pathParts[i]);
}
}
}
return resourceToken;
}
private MacPool.ReUsableMac getReUseableMacInstance() {
reInitializeIfPossible();
return macPool.take();
}
/*
* Ensures that this.macInstance is initialized
* In-case of credential change, optimistically will try to refresh the macInstance
*
* Implementation is non-blocking, the one which acquire the lock will try to refresh
* with new credentials
*
* NOTE: Calling it CTOR ensured that default is initialized.
*/
private void reInitializeIfPossible() {
if (this.currentCredentialKey != this.credential.getKey()) {
boolean lockAcquired = this.macInstanceLock.tryLock();
if (lockAcquired) {
try {
if (this.currentCredentialKey != this.credential.getKey()) {
byte[] masterKeyBytes = this.credential.getKey().getBytes(StandardCharsets.UTF_8);
byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes);
SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256");
try {
Mac macInstance = Mac.getInstance("HMACSHA256");
macInstance.init(signingKey);
this.currentCredentialKey = this.credential.getKey();
this.macPool = new MacPool(macInstance);
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
throw new IllegalStateException(e);
}
}
} finally {
this.macInstanceLock.unlock();
}
}
}
}
} | class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider {
private static final String AUTH_PREFIX = "type=master&ver=1.0&sig=";
private final AzureKeyCredential credential;
private volatile String currentCredentialKey;
private volatile MacPool macPool;
private final Lock macInstanceLock = new ReentrantLock();
public BaseAuthorizationTokenProvider(AzureKeyCredential credential) {
this.credential = credential;
reInitializeIfPossible();
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb.
* @param resourceIdOrFullName the resource id or full name
* @param resourceType the resource type.
* @param headers the request headers.
* @return the key authorization signature.
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
ResourceType resourceType,
Map<String, String> headers) {
return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName,
BaseAuthorizationTokenProvider.getResourceSegment(resourceType), headers);
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb
* @param resourceIdOrFullName the resource id or full name
* @param resourceSegment the resource segment
* @param headers the request headers
* @return the key authorization signature
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
String resourceSegment,
Map<String, String> headers) {
if (verb == null) {
throw new IllegalArgumentException("verb");
}
if (resourceIdOrFullName == null) {
resourceIdOrFullName = "";
}
if (resourceSegment == null) {
throw new IllegalArgumentException("resourceSegment");
}
if (headers == null) {
throw new IllegalArgumentException("headers");
}
if (StringUtils.isEmpty(this.credential.getKey())) {
throw new IllegalArgumentException("key credentials cannot be empty");
}
if(!PathsHelper.isNameBased(resourceIdOrFullName)) {
resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT);
}
StringBuilder body = new StringBuilder();
body.append(ModelBridgeInternal.toLower(verb))
.append('\n')
.append(resourceSegment)
.append('\n')
.append(resourceIdOrFullName)
.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
MacPool.ReUsableMac macInstance = getReUseableMacInstance();
try {
byte[] digest = macInstance.get().doFinal(body.toString().getBytes(StandardCharsets.UTF_8));
String auth = Utils.encodeBase64String(digest);
return AUTH_PREFIX + auth;
}
finally {
macInstance.close();
}
}
/**
* This API is a helper method to create auth header based on client request using resourceTokens.
*
* @param resourceTokens the resource tokens.
* @param path the path.
* @param resourceId the resource id.
* @return the authorization token.
*/
public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens,
String path,
String resourceId) {
if (resourceTokens == null) {
throw new IllegalArgumentException("resourceTokens");
}
String resourceToken = null;
if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) {
resourceToken = resourceTokens.get(resourceId);
} else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) {
if (resourceTokens.size() > 0) {
resourceToken = resourceTokens.values().iterator().next();
}
} else {
String[] pathParts = StringUtils.split(path, "/");
String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions",
"attachments", "media", "conflicts"};
HashSet<String> resourceTypesSet = new HashSet<String>();
Collections.addAll(resourceTypesSet, resourceTypes);
for (int i = pathParts.length - 1; i >= 0; --i) {
if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) {
resourceToken = resourceTokens.get(pathParts[i]);
}
}
}
return resourceToken;
}
private MacPool.ReUsableMac getReUseableMacInstance() {
reInitializeIfPossible();
return macPool.take();
}
/*
* Ensures that this.macInstance is initialized
* In-case of credential change, optimistically will try to refresh the macInstance
*
* Implementation is non-blocking, the one which acquire the lock will try to refresh
* with new credentials
*
* NOTE: Calling it CTOR ensured that default is initialized.
*/
private void reInitializeIfPossible() {
if (this.currentCredentialKey != this.credential.getKey()) {
boolean lockAcquired = this.macInstanceLock.tryLock();
if (lockAcquired) {
try {
if (this.currentCredentialKey != this.credential.getKey()) {
byte[] masterKeyBytes = this.credential.getKey().getBytes(StandardCharsets.UTF_8);
byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes);
SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256");
try {
Mac macInstance = Mac.getInstance("HMACSHA256");
macInstance.init(signingKey);
this.currentCredentialKey = this.credential.getKey();
this.macPool = new MacPool(macInstance);
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
throw new IllegalStateException(e);
}
}
} finally {
this.macInstanceLock.unlock();
}
}
}
}
} |
I would keep the isFeed condition first , as it has separate logic then rest of all, easier to read | private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) {
if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) {
String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType);
throw new IllegalArgumentException(errorMessage);
}
String resourcePath = null;
if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT;
} else if (!isFeed) {
resourcePath = resourceFullName;
} else if (resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT;
} else if (resourceType == ResourceType.DocumentCollection) {
resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.StoredProcedure) {
resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT;
} else if (resourceType == ResourceType.UserDefinedFunction) {
resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Trigger) {
resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Conflict) {
resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Attachment) {
resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.User) {
resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Permission) {
resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Document) {
resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Offer) {
return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.PartitionKeyRange) {
return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
} else if (resourceType == ResourceType.Schema) {
resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT;
} else if (resourceType == ResourceType.ClientEncryptionKey) {
resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
} else {
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
assert false : errorMessage;
throw new IllegalArgumentException(errorMessage);
}
return resourcePath;
} | } else if (!isFeed) { | private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) {
if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) {
String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType);
throw new IllegalArgumentException(errorMessage);
}
String resourcePath = null;
if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT;
} else if (!isFeed) {
resourcePath = resourceFullName;
} else if (resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT;
} else if (resourceType == ResourceType.DocumentCollection) {
resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.StoredProcedure) {
resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT;
} else if (resourceType == ResourceType.UserDefinedFunction) {
resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Trigger) {
resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Conflict) {
resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Attachment) {
resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.User) {
resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Permission) {
resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Document) {
resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Offer) {
return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.PartitionKeyRange) {
return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
} else if (resourceType == ResourceType.Schema) {
resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT;
} else if (resourceType == ResourceType.ClientEncryptionKey) {
resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
} else {
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
assert false : errorMessage;
throw new IllegalArgumentException(errorMessage);
}
return resourcePath;
} | class PathsHelper {
private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class);
public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) {
if (request.getIsNameBased()) {
return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType());
} else {
return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType());
}
}
public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) {
if (resourceName == null)
return null;
if (resourceType instanceof Database) {
return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName;
} else if (resourceOwnerFullName == null) {
return null;
} else if (resourceType instanceof DocumentCollection) {
return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof StoredProcedure) {
return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof UserDefinedFunction) {
return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Trigger) {
return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Conflict) {
return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof User) {
return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Permission) {
return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Document) {
return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Offer) {
return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Resource) {
return null;
}
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
assert false : errorMessage;
throw new IllegalArgumentException(errorMessage);
}
public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) {
if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) &&
resourceType != ResourceType.Database &&
resourceType != ResourceType.Offer &&
resourceType != ResourceType.MasterPartition &&
resourceType != ResourceType.ServerPartition &&
resourceType != ResourceType.DatabaseAccount &&
resourceType != ResourceType.Topology) {
throw new IllegalStateException("INVALID resource type");
}
if(ownerOrResourceId == null) {
ownerOrResourceId = StringUtils.EMPTY;
}
if (isFeed && resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT;
} else if (resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.DocumentCollection) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.DocumentCollection) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString();
} else if (isFeed && resourceType == ResourceType.Offer) {
return Paths.OFFERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Offer) {
return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.StoredProcedure) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.STORED_PROCEDURES_PATH_SEGMENT;
} else if (resourceType == ResourceType.StoredProcedure) {
ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" +
Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString();
} else if (isFeed && resourceType == ResourceType.UserDefinedFunction) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.UserDefinedFunction) {
ResourceId functionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" +
Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString();
} else if (isFeed && resourceType == ResourceType.Trigger) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.TRIGGERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Trigger) {
ResourceId triggerId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" +
Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString();
} else if (isFeed && resourceType == ResourceType.Conflict) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.CONFLICTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Conflict) {
ResourceId conflictId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" +
Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString();
} else if (isFeed && resourceType == ResourceType.PartitionKeyRange) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" +
documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
} else if (resourceType == ResourceType.PartitionKeyRange) {
ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" +
Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString();
} else if (isFeed && resourceType == ResourceType.Attachment) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" +
Paths.ATTACHMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Attachment) {
ResourceId attachmentId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" +
Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString();
} else if (isFeed && resourceType == ResourceType.User) {
return
Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" +
Paths.USERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.User) {
ResourceId userId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString();
} else if (isFeed && resourceType == ResourceType.Permission) {
ResourceId userId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" +
Paths.PERMISSIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Permission) {
ResourceId permissionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" +
Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString();
} else if (isFeed && resourceType == ResourceType.Document) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Document) {
ResourceId documentId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString();
} else if (isFeed && resourceType == ResourceType.MasterPartition) {
return Paths.PARTITIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.MasterPartition) {
return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.ServerPartition) {
return Paths.PARTITIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.ServerPartition) {
return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.Topology) {
return Paths.TOPOLOGY_PATH_SEGMENT;
} else if (resourceType == ResourceType.Topology) {
return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.DatabaseAccount) {
return Paths.DATABASE_ACCOUNT_PATH_SEGMENT;
} else if (resourceType == ResourceType.DatabaseAccount) {
return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (resourceType == ResourceType.ClientEncryptionKey) {
ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" +
Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString();
} else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT;
}
String errorMessage = "invalid resource type";
throw new IllegalStateException(errorMessage);
}
public static PathInfo parsePathSegments(String resourceUrl) {
String[] segments = StringUtils.strip(resourceUrl, "/").split("/");
if (segments == null || segments.length < 1) {
return null;
}
int uriSegmentsCount = segments.length;
String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/");
String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/")
: StringUtils.EMPTY;
if (uriSegmentsCount >= 2) {
if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) {
Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]);
if (!result.getLeft() || !result.getRight().isDatabaseId()) {
return parseNameSegments(resourceUrl, segments);
}
}
}
if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) {
return new PathInfo(true, segmentOne,
segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY,
false);
} else if (isResourceType(segmentTwo)) {
return new PathInfo(false, segmentTwo, segmentOne, false);
}
return null;
}
/**
* Method which will return boolean based on whether it is able to parse the
* path and name segment from resource url , and fill info in PathInfo object
* @param resourceUrl Complete ResourceLink
* @param pathInfo Path info object which will hold information
* @param clientVersion The Client version
* @return
*/
public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) {
pathInfo.resourcePath = StringUtils.EMPTY;
pathInfo.resourceIdOrFullName = StringUtils.EMPTY;
pathInfo.isFeed = false;
pathInfo.isNameBased = false;
if (StringUtils.isEmpty(resourceUrl)) {
return false;
}
String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR);
String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR);
if (segments == null || segments.length < 1) {
return false;
}
int uriSegmentsCount = segments.length;
String segmentOne = segments[uriSegmentsCount - 1];
String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY;
if (uriSegmentsCount >= 2) {
if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) {
Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]);
if (!result.getLeft() || !result.getRight().isDatabaseId()) {
pathInfo.isNameBased = true;
return tryParseNameSegments(resourceUrl, segments, pathInfo);
}
}
}
if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) {
pathInfo.isFeed = true;
pathInfo.resourcePath = segmentOne;
if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) {
pathInfo.resourceIdOrFullName = segmentTwo;
}
} else if (PathsHelper.isResourceType(segmentTwo)) {
pathInfo.isFeed = false;
pathInfo.resourcePath = segmentTwo;
pathInfo.resourceIdOrFullName = segmentOne;
if (!StringUtils.isEmpty(clientVersion)
&& pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) {
String attachmentId = null;
byte storeIndex = 0;
}
} else {
return false;
}
return true;
}
/**
* Method which will return boolean based on whether it is able to parse the
* name segment from resource url , and fill info in PathInfo object
* @param resourceUrl Complete ResourceLink
* @param segments
* @param pathInfo Path info object which will hold information
* @return
*/
private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) {
pathInfo.isFeed = false;
pathInfo.resourceIdOrFullName = "";
pathInfo.resourcePath = "";
if (segments == null || segments.length < 1) {
return false;
}
if (segments.length % 2 == 0) {
if (isResourceType(segments[segments.length - 2])) {
pathInfo.resourcePath = segments[segments.length - 2];
pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl);
return true;
}
} else {
if (isResourceType(segments[segments.length - 1])) {
pathInfo.isFeed = true;
pathInfo.resourcePath = segments[segments.length - 1];
String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT));
pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName);
return true;
}
}
return false;
}
public static PathInfo parseNameSegments(String resourceUrl, String[] segments) {
if (segments == null || segments.length < 1) {
return null;
}
if (segments.length % 2 == 0) {
if (isResourceType(segments[segments.length - 2])) {
return new PathInfo(false,
segments[segments.length - 2],
unescapeJavaAndTrim(resourceUrl),
true);
}
} else {
if (isResourceType(segments[segments.length - 1])) {
return new PathInfo(true,
segments[segments.length - 1],
unescapeJavaAndTrim(
resourceUrl.substring(0,
StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))),
true);
}
}
return null;
}
public static String unescapeJavaAndTrim(String resourceUrl) {
if (resourceUrl == null) {
return null;
}
int startInclusiveIndex = 0;
while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) {
startInclusiveIndex++;
}
if (startInclusiveIndex == resourceUrl.length()) {
return "";
}
int endExclusiveIndex = resourceUrl.length();
while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) {
endExclusiveIndex--;
}
for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) {
if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) {
return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT));
}
}
if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) {
return resourceUrl;
}
return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex);
}
private static boolean isResourceType(String resourcePathSegment) {
if (StringUtils.isEmpty(resourcePathSegment)) {
return false;
}
switch (resourcePathSegment.toLowerCase(Locale.ROOT)) {
case Paths.ATTACHMENTS_PATH_SEGMENT:
case Paths.COLLECTIONS_PATH_SEGMENT:
case Paths.DATABASES_PATH_SEGMENT:
case Paths.PERMISSIONS_PATH_SEGMENT:
case Paths.USERS_PATH_SEGMENT:
case Paths.DOCUMENTS_PATH_SEGMENT:
case Paths.STORED_PROCEDURES_PATH_SEGMENT:
case Paths.TRIGGERS_PATH_SEGMENT:
case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT:
case Paths.CONFLICTS_PATH_SEGMENT:
case Paths.MEDIA_PATH_SEGMENT:
case Paths.OFFERS_PATH_SEGMENT:
case Paths.PARTITIONS_PATH_SEGMENT:
case Paths.DATABASE_ACCOUNT_PATH_SEGMENT:
case Paths.TOPOLOGY_PATH_SEGMENT:
case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT:
case Paths.SCHEMAS_PATH_SEGMENT:
case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT:
return true;
default:
return false;
}
}
public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) {
switch (resourceType) {
case Database:
return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName;
case DocumentCollection:
return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName;
case StoredProcedure:
return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName;
case UserDefinedFunction:
return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName;
case Trigger:
return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName;
case Attachment:
return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName;
case Conflict:
return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName;
case Document:
return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName;
case Offer:
return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName;
case Permission:
return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName;
case User:
return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName;
case PartitionKeyRange:
return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName;
default:
return null;
}
}
public static String getCollectionPath(String resourceFullName) {
if (resourceFullName != null) {
String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int index = indexOfNth(trimmedResourceFullName, '/', 4);
if (index > 0)
return trimmedResourceFullName.substring(0, index);
}
return resourceFullName;
}
public static String getDatabasePath(String resourceFullName) {
if (resourceFullName != null) {
int index = indexOfNth(resourceFullName, '/', 2);
if (index > 0)
return resourceFullName.substring(0, index);
}
return resourceFullName;
}
public static String getParentByIndex(String resourceFullName, int segmentIndex) {
int index = indexOfNth(resourceFullName, '/', segmentIndex);
if (index > 0)
return resourceFullName.substring(0, index);
else {
index = indexOfNth(resourceFullName, '/', segmentIndex - 1);
if (index > 0)
return resourceFullName;
else
return null;
}
}
public static boolean isNameBased(String resourceIdOrFullName) {
if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty()
&& resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') {
return true;
}
return false;
}
private static int indexOfNth(String str, char value, int nthOccurance) {
int remaining = nthOccurance;
char[] characters = str.toCharArray();
for (int i = 0; i < characters.length; i++) {
if (characters[i] == value) {
remaining--;
if (remaining == 0) {
return i;
}
}
}
return -1;
}
public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException {
if (StringUtils.isEmpty(resourcePathSegment)) {
String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment");
throw new BadRequestException(message);
}
switch (resourcePathSegment) {
case Paths.ATTACHMENTS_PATH_SEGMENT:
return ResourceType.Attachment;
case Paths.COLLECTIONS_PATH_SEGMENT:
return ResourceType.DocumentCollection;
case Paths.DATABASES_PATH_SEGMENT:
return ResourceType.Database;
case Paths.PERMISSIONS_PATH_SEGMENT:
return ResourceType.Permission;
case Paths.USERS_PATH_SEGMENT:
return ResourceType.User;
case Paths.DOCUMENTS_PATH_SEGMENT:
return ResourceType.Document;
case Paths.STORED_PROCEDURES_PATH_SEGMENT:
return ResourceType.StoredProcedure;
case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT:
return ResourceType.UserDefinedFunction;
case Paths.TRIGGERS_PATH_SEGMENT:
return ResourceType.Trigger;
case Paths.CONFLICTS_PATH_SEGMENT:
return ResourceType.Conflict;
case Paths.OFFERS_PATH_SEGMENT:
return ResourceType.Offer;
case Paths.SCHEMAS_PATH_SEGMENT:
return ResourceType.Schema;
}
String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment);
throw new BadRequestException(errorMessage);
}
public static String getResourcePath(ResourceType resourceType) throws BadRequestException {
switch (resourceType) {
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case DocumentCollection:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case MasterPartition:
case ServerPartition:
return Paths.PARTITIONS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_ROOT;
case Schema:
return Paths.SCHEMAS_PATH_SEGMENT;
case DatabaseAccount:
case Topology:
return Paths.ROOT;
default:
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
throw new BadRequestException(errorMessage);
}
}
public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) {
String[] segments = StringUtils.split(resourceFullName, '/');
String[] resourcePathArray = getResourcePathArray(resourceType);
if (resourcePathArray == null) {
return false;
}
if (segments.length != resourcePathArray.length * 2) {
return false;
}
for (int i = 0; i < resourcePathArray.length; i++) {
if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) {
return false;
}
}
return true;
}
private static String[] getResourcePathArray(ResourceType resourceType) {
List<String> segments = new ArrayList<String>();
segments.add(Paths.DATABASES_PATH_SEGMENT);
if (resourceType == ResourceType.Permission ||
resourceType == ResourceType.User) {
segments.add(Paths.USERS_PATH_SEGMENT);
if (resourceType == ResourceType.Permission) {
segments.add(Paths.PERMISSIONS_PATH_SEGMENT);
}
} else if (resourceType == ResourceType.ClientEncryptionKey) {
segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
} else if (resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.StoredProcedure ||
resourceType == ResourceType.UserDefinedFunction ||
resourceType == ResourceType.Trigger ||
resourceType == ResourceType.Conflict ||
resourceType == ResourceType.Attachment ||
resourceType == ResourceType.Document ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.Schema) {
segments.add(Paths.COLLECTIONS_PATH_SEGMENT);
if (resourceType == ResourceType.StoredProcedure) {
segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT);
} else if(resourceType == ResourceType.UserDefinedFunction) {
segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
} else if(resourceType == ResourceType.Trigger) {
segments.add(Paths.TRIGGERS_PATH_SEGMENT);
} else if (resourceType == ResourceType.Conflict) {
segments.add(Paths.CONFLICTS_PATH_SEGMENT);
} else if (resourceType == ResourceType.Schema) {
segments.add(Paths.SCHEMAS_PATH_SEGMENT);
} else if(resourceType == ResourceType.Document ||
resourceType == ResourceType.Attachment) {
segments.add(Paths.DOCUMENTS_PATH_SEGMENT);
if (resourceType == ResourceType.Attachment) {
segments.add(Paths.ATTACHMENTS_PATH_SEGMENT);
}
} else if(resourceType == ResourceType.PartitionKeyRange) {
segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT);
} else if (resourceType == ResourceType.PartitionKey) {
segments.add(Paths.COLLECTIONS_PATH_SEGMENT);
segments.add(Paths.OPERATIONS_PATH_SEGMENT);
}
} else if (resourceType != ResourceType.Database) {
return null;
}
return segments.stream().toArray(String[]::new);
}
public static boolean validateResourceId(ResourceType resourceType, String resourceId) {
if (resourceType == ResourceType.Conflict) {
return PathsHelper.validateConflictId(resourceId);
} else if (resourceType == ResourceType.Database) {
return PathsHelper.validateDatabaseId(resourceId);
} else if (resourceType == ResourceType.DocumentCollection) {
return PathsHelper.validateDocumentCollectionId(resourceId);
} else if (resourceType == ResourceType.Document) {
return PathsHelper.validateDocumentId(resourceId);
} else if (resourceType == ResourceType.Permission) {
return PathsHelper.validatePermissionId(resourceId);
} else if (resourceType == ResourceType.StoredProcedure) {
return PathsHelper.validateStoredProcedureId(resourceId);
} else if (resourceType == ResourceType.Trigger) {
return PathsHelper.validateTriggerId(resourceId);
} else if (resourceType == ResourceType.UserDefinedFunction) {
return PathsHelper.validateUserDefinedFunctionId(resourceId);
} else if (resourceType == ResourceType.User) {
return PathsHelper.validateUserId(resourceId);
} else if (resourceType == ResourceType.Attachment) {
return PathsHelper.validateAttachmentId(resourceId);
} else if (resourceType == ResourceType.ClientEncryptionKey) {
return PathsHelper.validateClientEncryptionKeyId(resourceId);
}else {
logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString()));
return false;
}
}
public static boolean validateDatabaseId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDatabase() != 0;
}
public static boolean validateDocumentCollectionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDocumentCollection() != 0;
}
public static boolean validateDocumentId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDocument() != 0;
}
public static boolean validateConflictId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getConflict() != 0;
}
public static boolean validateAttachmentId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getAttachment() != 0;
}
public static boolean validatePermissionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getPermission() != 0;
}
public static boolean validateStoredProcedureId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getStoredProcedure() != 0;
}
public static boolean validateTriggerId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getTrigger() != 0;
}
public static boolean validateUserDefinedFunctionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0;
}
public static boolean validateUserId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getUser() != 0;
}
public static boolean validateClientEncryptionKeyId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0;
}
public static boolean isPublicResource(Resource resourceType) {
if (resourceType instanceof Database ||
resourceType instanceof DocumentCollection ||
resourceType instanceof StoredProcedure ||
resourceType instanceof UserDefinedFunction ||
resourceType instanceof Trigger ||
resourceType instanceof Conflict ||
resourceType instanceof User ||
resourceType instanceof Permission ||
resourceType instanceof Document ||
resourceType instanceof Offer
) {
return true;
} else {
return false;
}
}
} | class PathsHelper {
private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class);
public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) {
if (request.getIsNameBased()) {
return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType());
} else {
return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType());
}
}
public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) {
if (resourceName == null)
return null;
if (resourceType instanceof Database) {
return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName;
} else if (resourceOwnerFullName == null) {
return null;
} else if (resourceType instanceof DocumentCollection) {
return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof StoredProcedure) {
return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof UserDefinedFunction) {
return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Trigger) {
return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Conflict) {
return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof User) {
return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Permission) {
return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Document) {
return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Offer) {
return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Resource) {
return null;
}
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
assert false : errorMessage;
throw new IllegalArgumentException(errorMessage);
}
public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) {
if (resourceType == ResourceType.PartitionKey) {
return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete);
} else {
return generatePath(resourceType, ownerOrResourceId, isFeed, null);
}
}
private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) {
if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) &&
resourceType != ResourceType.Database &&
resourceType != ResourceType.Offer &&
resourceType != ResourceType.MasterPartition &&
resourceType != ResourceType.ServerPartition &&
resourceType != ResourceType.DatabaseAccount &&
resourceType != ResourceType.Topology) {
throw new IllegalStateException("INVALID resource type");
}
if(ownerOrResourceId == null) {
ownerOrResourceId = StringUtils.EMPTY;
}
if (isFeed && resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT;
} else if (resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.DocumentCollection) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.DocumentCollection) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString();
} else if (isFeed && resourceType == ResourceType.Offer) {
return Paths.OFFERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Offer) {
return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.StoredProcedure) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.STORED_PROCEDURES_PATH_SEGMENT;
} else if (resourceType == ResourceType.StoredProcedure) {
ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" +
Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString();
} else if (isFeed && resourceType == ResourceType.UserDefinedFunction) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.UserDefinedFunction) {
ResourceId functionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" +
Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString();
} else if (isFeed && resourceType == ResourceType.Trigger) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.TRIGGERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Trigger) {
ResourceId triggerId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" +
Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString();
} else if (isFeed && resourceType == ResourceType.Conflict) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.CONFLICTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Conflict) {
ResourceId conflictId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" +
Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString();
} else if (isFeed && resourceType == ResourceType.PartitionKeyRange) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" +
documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
} else if (resourceType == ResourceType.PartitionKeyRange) {
ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" +
Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString();
} else if (isFeed && resourceType == ResourceType.Attachment) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" +
Paths.ATTACHMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Attachment) {
ResourceId attachmentId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" +
Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString();
} else if (isFeed && resourceType == ResourceType.User) {
return
Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" +
Paths.USERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.User) {
ResourceId userId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString();
} else if (isFeed && resourceType == ResourceType.Permission) {
ResourceId userId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" +
Paths.PERMISSIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Permission) {
ResourceId permissionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" +
Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString();
} else if (isFeed && resourceType == ResourceType.Document) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Document) {
ResourceId documentId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString();
} else if (isFeed && resourceType == ResourceType.MasterPartition) {
return Paths.PARTITIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.MasterPartition) {
return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.ServerPartition) {
return Paths.PARTITIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.ServerPartition) {
return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.Topology) {
return Paths.TOPOLOGY_PATH_SEGMENT;
} else if (resourceType == ResourceType.Topology) {
return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.DatabaseAccount) {
return Paths.DATABASE_ACCOUNT_PATH_SEGMENT;
} else if (resourceType == ResourceType.DatabaseAccount) {
return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (resourceType == ResourceType.ClientEncryptionKey) {
ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" +
Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString();
} else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT;
}
String errorMessage = "invalid resource type";
throw new IllegalStateException(errorMessage);
}
public static PathInfo parsePathSegments(String resourceUrl) {
String[] segments = StringUtils.strip(resourceUrl, "/").split("/");
if (segments == null || segments.length < 1) {
return null;
}
int uriSegmentsCount = segments.length;
String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/");
String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/")
: StringUtils.EMPTY;
if (uriSegmentsCount >= 2) {
if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) {
Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]);
if (!result.getLeft() || !result.getRight().isDatabaseId()) {
return parseNameSegments(resourceUrl, segments);
}
}
}
if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) {
return new PathInfo(true, segmentOne,
segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY,
false);
} else if (isResourceType(segmentTwo)) {
return new PathInfo(false, segmentTwo, segmentOne, false);
}
return null;
}
/**
* Method which will return boolean based on whether it is able to parse the
* path and name segment from resource url , and fill info in PathInfo object
* @param resourceUrl Complete ResourceLink
* @param pathInfo Path info object which will hold information
* @param clientVersion The Client version
* @return
*/
public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) {
pathInfo.resourcePath = StringUtils.EMPTY;
pathInfo.resourceIdOrFullName = StringUtils.EMPTY;
pathInfo.isFeed = false;
pathInfo.isNameBased = false;
if (StringUtils.isEmpty(resourceUrl)) {
return false;
}
String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR);
String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR);
if (segments == null || segments.length < 1) {
return false;
}
int uriSegmentsCount = segments.length;
String segmentOne = segments[uriSegmentsCount - 1];
String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY;
if (uriSegmentsCount >= 2) {
if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) {
Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]);
if (!result.getLeft() || !result.getRight().isDatabaseId()) {
pathInfo.isNameBased = true;
return tryParseNameSegments(resourceUrl, segments, pathInfo);
}
}
}
if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) {
pathInfo.isFeed = true;
pathInfo.resourcePath = segmentOne;
if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) {
pathInfo.resourceIdOrFullName = segmentTwo;
}
} else if (PathsHelper.isResourceType(segmentTwo)) {
pathInfo.isFeed = false;
pathInfo.resourcePath = segmentTwo;
pathInfo.resourceIdOrFullName = segmentOne;
if (!StringUtils.isEmpty(clientVersion)
&& pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) {
String attachmentId = null;
byte storeIndex = 0;
}
} else {
return false;
}
return true;
}
/**
* Method which will return boolean based on whether it is able to parse the
* name segment from resource url , and fill info in PathInfo object
* @param resourceUrl Complete ResourceLink
* @param segments
* @param pathInfo Path info object which will hold information
* @return
*/
private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) {
pathInfo.isFeed = false;
pathInfo.resourceIdOrFullName = "";
pathInfo.resourcePath = "";
if (segments == null || segments.length < 1) {
return false;
}
if (segments.length % 2 == 0) {
if (isResourceType(segments[segments.length - 2])) {
pathInfo.resourcePath = segments[segments.length - 2];
pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl);
return true;
}
} else {
if (isResourceType(segments[segments.length - 1])) {
pathInfo.isFeed = true;
pathInfo.resourcePath = segments[segments.length - 1];
String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT));
pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName);
return true;
}
}
return false;
}
public static PathInfo parseNameSegments(String resourceUrl, String[] segments) {
if (segments == null || segments.length < 1) {
return null;
}
if (segments.length % 2 == 0) {
if (isResourceType(segments[segments.length - 2])) {
return new PathInfo(false,
segments[segments.length - 2],
unescapeJavaAndTrim(resourceUrl),
true);
}
} else {
if (isResourceType(segments[segments.length - 1])) {
return new PathInfo(true,
segments[segments.length - 1],
unescapeJavaAndTrim(
resourceUrl.substring(0,
StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))),
true);
}
}
return null;
}
public static String unescapeJavaAndTrim(String resourceUrl) {
if (resourceUrl == null) {
return null;
}
int startInclusiveIndex = 0;
while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) {
startInclusiveIndex++;
}
if (startInclusiveIndex == resourceUrl.length()) {
return "";
}
int endExclusiveIndex = resourceUrl.length();
while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) {
endExclusiveIndex--;
}
for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) {
if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) {
return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT));
}
}
if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) {
return resourceUrl;
}
return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex);
}
private static boolean isResourceType(String resourcePathSegment) {
if (StringUtils.isEmpty(resourcePathSegment)) {
return false;
}
switch (resourcePathSegment.toLowerCase(Locale.ROOT)) {
case Paths.ATTACHMENTS_PATH_SEGMENT:
case Paths.COLLECTIONS_PATH_SEGMENT:
case Paths.DATABASES_PATH_SEGMENT:
case Paths.PERMISSIONS_PATH_SEGMENT:
case Paths.USERS_PATH_SEGMENT:
case Paths.DOCUMENTS_PATH_SEGMENT:
case Paths.STORED_PROCEDURES_PATH_SEGMENT:
case Paths.TRIGGERS_PATH_SEGMENT:
case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT:
case Paths.CONFLICTS_PATH_SEGMENT:
case Paths.MEDIA_PATH_SEGMENT:
case Paths.OFFERS_PATH_SEGMENT:
case Paths.PARTITIONS_PATH_SEGMENT:
case Paths.DATABASE_ACCOUNT_PATH_SEGMENT:
case Paths.TOPOLOGY_PATH_SEGMENT:
case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT:
case Paths.SCHEMAS_PATH_SEGMENT:
case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT:
return true;
default:
return false;
}
}
public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) {
switch (resourceType) {
case Database:
return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName;
case DocumentCollection:
return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName;
case StoredProcedure:
return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName;
case UserDefinedFunction:
return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName;
case Trigger:
return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName;
case Attachment:
return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName;
case Conflict:
return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName;
case Document:
return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName;
case Offer:
return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName;
case Permission:
return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName;
case User:
return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName;
case PartitionKeyRange:
return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName;
default:
return null;
}
}
public static String getCollectionPath(String resourceFullName) {
if (resourceFullName != null) {
String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int index = indexOfNth(trimmedResourceFullName, '/', 4);
if (index > 0)
return trimmedResourceFullName.substring(0, index);
}
return resourceFullName;
}
public static String getDatabasePath(String resourceFullName) {
if (resourceFullName != null) {
int index = indexOfNth(resourceFullName, '/', 2);
if (index > 0)
return resourceFullName.substring(0, index);
}
return resourceFullName;
}
public static String getParentByIndex(String resourceFullName, int segmentIndex) {
int index = indexOfNth(resourceFullName, '/', segmentIndex);
if (index > 0)
return resourceFullName.substring(0, index);
else {
index = indexOfNth(resourceFullName, '/', segmentIndex - 1);
if (index > 0)
return resourceFullName;
else
return null;
}
}
public static boolean isNameBased(String resourceIdOrFullName) {
if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty()
&& resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') {
return true;
}
return false;
}
private static int indexOfNth(String str, char value, int nthOccurance) {
int remaining = nthOccurance;
char[] characters = str.toCharArray();
for (int i = 0; i < characters.length; i++) {
if (characters[i] == value) {
remaining--;
if (remaining == 0) {
return i;
}
}
}
return -1;
}
public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException {
if (StringUtils.isEmpty(resourcePathSegment)) {
String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment");
throw new BadRequestException(message);
}
switch (resourcePathSegment) {
case Paths.ATTACHMENTS_PATH_SEGMENT:
return ResourceType.Attachment;
case Paths.COLLECTIONS_PATH_SEGMENT:
return ResourceType.DocumentCollection;
case Paths.DATABASES_PATH_SEGMENT:
return ResourceType.Database;
case Paths.PERMISSIONS_PATH_SEGMENT:
return ResourceType.Permission;
case Paths.USERS_PATH_SEGMENT:
return ResourceType.User;
case Paths.DOCUMENTS_PATH_SEGMENT:
return ResourceType.Document;
case Paths.STORED_PROCEDURES_PATH_SEGMENT:
return ResourceType.StoredProcedure;
case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT:
return ResourceType.UserDefinedFunction;
case Paths.TRIGGERS_PATH_SEGMENT:
return ResourceType.Trigger;
case Paths.CONFLICTS_PATH_SEGMENT:
return ResourceType.Conflict;
case Paths.OFFERS_PATH_SEGMENT:
return ResourceType.Offer;
case Paths.SCHEMAS_PATH_SEGMENT:
return ResourceType.Schema;
}
String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment);
throw new BadRequestException(errorMessage);
}
public static String getResourcePath(ResourceType resourceType) throws BadRequestException {
switch (resourceType) {
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case DocumentCollection:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case MasterPartition:
case ServerPartition:
return Paths.PARTITIONS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_ROOT;
case Schema:
return Paths.SCHEMAS_PATH_SEGMENT;
case DatabaseAccount:
case Topology:
return Paths.ROOT;
default:
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
throw new BadRequestException(errorMessage);
}
}
public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) {
String[] segments = StringUtils.split(resourceFullName, '/');
String[] resourcePathArray = getResourcePathArray(resourceType);
if (resourcePathArray == null) {
return false;
}
if (segments.length != resourcePathArray.length * 2) {
return false;
}
for (int i = 0; i < resourcePathArray.length; i++) {
if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) {
return false;
}
}
return true;
}
private static String[] getResourcePathArray(ResourceType resourceType) {
List<String> segments = new ArrayList<String>();
segments.add(Paths.DATABASES_PATH_SEGMENT);
if (resourceType == ResourceType.Permission ||
resourceType == ResourceType.User) {
segments.add(Paths.USERS_PATH_SEGMENT);
if (resourceType == ResourceType.Permission) {
segments.add(Paths.PERMISSIONS_PATH_SEGMENT);
}
} else if (resourceType == ResourceType.ClientEncryptionKey) {
segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
} else if (resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.StoredProcedure ||
resourceType == ResourceType.UserDefinedFunction ||
resourceType == ResourceType.Trigger ||
resourceType == ResourceType.Conflict ||
resourceType == ResourceType.Attachment ||
resourceType == ResourceType.Document ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.Schema) {
segments.add(Paths.COLLECTIONS_PATH_SEGMENT);
if (resourceType == ResourceType.StoredProcedure) {
segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT);
} else if(resourceType == ResourceType.UserDefinedFunction) {
segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
} else if(resourceType == ResourceType.Trigger) {
segments.add(Paths.TRIGGERS_PATH_SEGMENT);
} else if (resourceType == ResourceType.Conflict) {
segments.add(Paths.CONFLICTS_PATH_SEGMENT);
} else if (resourceType == ResourceType.Schema) {
segments.add(Paths.SCHEMAS_PATH_SEGMENT);
} else if(resourceType == ResourceType.Document ||
resourceType == ResourceType.Attachment) {
segments.add(Paths.DOCUMENTS_PATH_SEGMENT);
if (resourceType == ResourceType.Attachment) {
segments.add(Paths.ATTACHMENTS_PATH_SEGMENT);
}
} else if(resourceType == ResourceType.PartitionKeyRange) {
segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT);
} else if (resourceType == ResourceType.PartitionKey) {
segments.add(Paths.COLLECTIONS_PATH_SEGMENT);
segments.add(Paths.OPERATIONS_PATH_SEGMENT);
}
} else if (resourceType != ResourceType.Database) {
return null;
}
return segments.stream().toArray(String[]::new);
}
public static boolean validateResourceId(ResourceType resourceType, String resourceId) {
if (resourceType == ResourceType.Conflict) {
return PathsHelper.validateConflictId(resourceId);
} else if (resourceType == ResourceType.Database) {
return PathsHelper.validateDatabaseId(resourceId);
} else if (resourceType == ResourceType.DocumentCollection) {
return PathsHelper.validateDocumentCollectionId(resourceId);
} else if (resourceType == ResourceType.Document) {
return PathsHelper.validateDocumentId(resourceId);
} else if (resourceType == ResourceType.Permission) {
return PathsHelper.validatePermissionId(resourceId);
} else if (resourceType == ResourceType.StoredProcedure) {
return PathsHelper.validateStoredProcedureId(resourceId);
} else if (resourceType == ResourceType.Trigger) {
return PathsHelper.validateTriggerId(resourceId);
} else if (resourceType == ResourceType.UserDefinedFunction) {
return PathsHelper.validateUserDefinedFunctionId(resourceId);
} else if (resourceType == ResourceType.User) {
return PathsHelper.validateUserId(resourceId);
} else if (resourceType == ResourceType.Attachment) {
return PathsHelper.validateAttachmentId(resourceId);
} else if (resourceType == ResourceType.ClientEncryptionKey) {
return PathsHelper.validateClientEncryptionKeyId(resourceId);
}else {
logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString()));
return false;
}
}
public static boolean validateDatabaseId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDatabase() != 0;
}
public static boolean validateDocumentCollectionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDocumentCollection() != 0;
}
public static boolean validateDocumentId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDocument() != 0;
}
public static boolean validateConflictId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getConflict() != 0;
}
public static boolean validateAttachmentId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getAttachment() != 0;
}
public static boolean validatePermissionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getPermission() != 0;
}
public static boolean validateStoredProcedureId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getStoredProcedure() != 0;
}
public static boolean validateTriggerId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getTrigger() != 0;
}
public static boolean validateUserDefinedFunctionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0;
}
public static boolean validateUserId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getUser() != 0;
}
public static boolean validateClientEncryptionKeyId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0;
}
public static boolean isPublicResource(Resource resourceType) {
if (resourceType instanceof Database ||
resourceType instanceof DocumentCollection ||
resourceType instanceof StoredProcedure ||
resourceType instanceof UserDefinedFunction ||
resourceType instanceof Trigger ||
resourceType instanceof Conflict ||
resourceType instanceof User ||
resourceType instanceof Permission ||
resourceType instanceof Document ||
resourceType instanceof Offer
) {
return true;
} else {
return false;
}
}
} |
Is this need to be post ? | private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
} | return this.performRequest(request, HttpMethod.POST); | private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
} | class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if(request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds());
if (OperationType.QueryPlan.equals(request.getOperationType())) {
responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds());
} else if (request.isAddressRefresh()) {
responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds());
}
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
}
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if(request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
reactorNettyRequestRecord.takeTimelineSnapshot());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()),
content);
DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot());
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null);
DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics);
}
return rsp;
})
.single();
}).map(rsp -> {
if (httpRequest.reactorNettyRequestRecord() != null) {
return new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
return new RxDocumentServiceResponse(this.clientContext, rsp);
}
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) {
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics)));
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
this.applySessionToken(request);
Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request);
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
}
).map(response ->
{
this.captureSessionToken(request, response.getResponseHeaders());
return response;
}
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private void applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL);
boolean sessionTokenApplicable =
Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
(this.defaultConsistencyLevel == ConsistencyLevel.SESSION &&
(!request.isReadOnlyRequest() ||
request.getResourceType() != ResourceType.Document ||
!Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString())));
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return;
}
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
return;
}
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
} | class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if(request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds());
if (OperationType.QueryPlan.equals(request.getOperationType())) {
responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds());
} else if (request.isAddressRefresh()) {
responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds());
}
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
}
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if(request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
reactorNettyRequestRecord.takeTimelineSnapshot());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()),
content);
DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot());
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null);
DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics);
}
return rsp;
})
.single();
}).map(rsp -> {
if (httpRequest.reactorNettyRequestRecord() != null) {
return new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
return new RxDocumentServiceResponse(this.clientContext, rsp);
}
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) {
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics)));
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
this.applySessionToken(request);
Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request);
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
}
).map(response ->
{
this.captureSessionToken(request, response.getResponseHeaders());
return response;
}
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private void applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL);
boolean sessionTokenApplicable =
Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
(this.defaultConsistencyLevel == ConsistencyLevel.SESSION &&
(!request.isReadOnlyRequest() ||
request.getResourceType() != ResourceType.Document ||
!Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString())));
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return;
}
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
return;
}
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
} |
You might need to added support for resource token requests as it is introducing new resource type , please check method https://github.com/Azure/azure-sdk-for-java/blob/c28d9a2e4b40feece2a26799d0fecb31d6721b84/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/BaseAuthorizationTokenProvider.java#L177 to verify you can add a simple test in this test class https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/ResourceTokenTestForV4.java | private static String getResourceSegment(ResourceType resourceType) {
switch (resourceType) {
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case DocumentCollection:
case PartitionKey:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_PATH_SEGMENT;
case DatabaseAccount:
return "";
case ClientTelemetry:
return "";
case ClientEncryptionKey:
return Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
default:
return null;
}
} | case PartitionKey: | private static String getResourceSegment(ResourceType resourceType) {
switch (resourceType) {
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case DocumentCollection:
case PartitionKey:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_PATH_SEGMENT;
case DatabaseAccount:
return "";
case ClientTelemetry:
return "";
case ClientEncryptionKey:
return Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
default:
return null;
}
} | class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider {
private static final String AUTH_PREFIX = "type=master&ver=1.0&sig=";
private final AzureKeyCredential credential;
private volatile String currentCredentialKey;
private volatile MacPool macPool;
private final Lock macInstanceLock = new ReentrantLock();
public BaseAuthorizationTokenProvider(AzureKeyCredential credential) {
this.credential = credential;
reInitializeIfPossible();
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb.
* @param resourceIdOrFullName the resource id or full name
* @param resourceType the resource type.
* @param headers the request headers.
* @return the key authorization signature.
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
ResourceType resourceType,
Map<String, String> headers) {
return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName,
BaseAuthorizationTokenProvider.getResourceSegment(resourceType), headers);
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb
* @param resourceIdOrFullName the resource id or full name
* @param resourceSegment the resource segment
* @param headers the request headers
* @return the key authorization signature
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
String resourceSegment,
Map<String, String> headers) {
if (verb == null) {
throw new IllegalArgumentException("verb");
}
if (resourceIdOrFullName == null) {
resourceIdOrFullName = "";
}
if (resourceSegment == null) {
throw new IllegalArgumentException("resourceSegment");
}
if (headers == null) {
throw new IllegalArgumentException("headers");
}
if (StringUtils.isEmpty(this.credential.getKey())) {
throw new IllegalArgumentException("key credentials cannot be empty");
}
if(!PathsHelper.isNameBased(resourceIdOrFullName)) {
resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT);
}
StringBuilder body = new StringBuilder();
body.append(ModelBridgeInternal.toLower(verb))
.append('\n')
.append(resourceSegment)
.append('\n')
.append(resourceIdOrFullName)
.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
MacPool.ReUsableMac macInstance = getReUseableMacInstance();
try {
byte[] digest = macInstance.get().doFinal(body.toString().getBytes(StandardCharsets.UTF_8));
String auth = Utils.encodeBase64String(digest);
return AUTH_PREFIX + auth;
}
finally {
macInstance.close();
}
}
/**
* This API is a helper method to create auth header based on client request using resourceTokens.
*
* @param resourceTokens the resource tokens.
* @param path the path.
* @param resourceId the resource id.
* @return the authorization token.
*/
public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens,
String path,
String resourceId) {
if (resourceTokens == null) {
throw new IllegalArgumentException("resourceTokens");
}
String resourceToken = null;
if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) {
resourceToken = resourceTokens.get(resourceId);
} else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) {
if (resourceTokens.size() > 0) {
resourceToken = resourceTokens.values().iterator().next();
}
} else {
String[] pathParts = StringUtils.split(path, "/");
String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions",
"attachments", "media", "conflicts"};
HashSet<String> resourceTypesSet = new HashSet<String>();
Collections.addAll(resourceTypesSet, resourceTypes);
for (int i = pathParts.length - 1; i >= 0; --i) {
if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) {
resourceToken = resourceTokens.get(pathParts[i]);
}
}
}
return resourceToken;
}
private MacPool.ReUsableMac getReUseableMacInstance() {
reInitializeIfPossible();
return macPool.take();
}
/*
* Ensures that this.macInstance is initialized
* In-case of credential change, optimistically will try to refresh the macInstance
*
* Implementation is non-blocking, the one which acquire the lock will try to refresh
* with new credentials
*
* NOTE: Calling it CTOR ensured that default is initialized.
*/
private void reInitializeIfPossible() {
if (this.currentCredentialKey != this.credential.getKey()) {
boolean lockAcquired = this.macInstanceLock.tryLock();
if (lockAcquired) {
try {
if (this.currentCredentialKey != this.credential.getKey()) {
byte[] masterKeyBytes = this.credential.getKey().getBytes(StandardCharsets.UTF_8);
byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes);
SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256");
try {
Mac macInstance = Mac.getInstance("HMACSHA256");
macInstance.init(signingKey);
this.currentCredentialKey = this.credential.getKey();
this.macPool = new MacPool(macInstance);
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
throw new IllegalStateException(e);
}
}
} finally {
this.macInstanceLock.unlock();
}
}
}
}
} | class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider {
private static final String AUTH_PREFIX = "type=master&ver=1.0&sig=";
private final AzureKeyCredential credential;
private volatile String currentCredentialKey;
private volatile MacPool macPool;
private final Lock macInstanceLock = new ReentrantLock();
public BaseAuthorizationTokenProvider(AzureKeyCredential credential) {
this.credential = credential;
reInitializeIfPossible();
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb.
* @param resourceIdOrFullName the resource id or full name
* @param resourceType the resource type.
* @param headers the request headers.
* @return the key authorization signature.
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
ResourceType resourceType,
Map<String, String> headers) {
return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName,
BaseAuthorizationTokenProvider.getResourceSegment(resourceType), headers);
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb
* @param resourceIdOrFullName the resource id or full name
* @param resourceSegment the resource segment
* @param headers the request headers
* @return the key authorization signature
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
String resourceSegment,
Map<String, String> headers) {
if (verb == null) {
throw new IllegalArgumentException("verb");
}
if (resourceIdOrFullName == null) {
resourceIdOrFullName = "";
}
if (resourceSegment == null) {
throw new IllegalArgumentException("resourceSegment");
}
if (headers == null) {
throw new IllegalArgumentException("headers");
}
if (StringUtils.isEmpty(this.credential.getKey())) {
throw new IllegalArgumentException("key credentials cannot be empty");
}
if(!PathsHelper.isNameBased(resourceIdOrFullName)) {
resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT);
}
StringBuilder body = new StringBuilder();
body.append(ModelBridgeInternal.toLower(verb))
.append('\n')
.append(resourceSegment)
.append('\n')
.append(resourceIdOrFullName)
.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
MacPool.ReUsableMac macInstance = getReUseableMacInstance();
try {
byte[] digest = macInstance.get().doFinal(body.toString().getBytes(StandardCharsets.UTF_8));
String auth = Utils.encodeBase64String(digest);
return AUTH_PREFIX + auth;
}
finally {
macInstance.close();
}
}
/**
* This API is a helper method to create auth header based on client request using resourceTokens.
*
* @param resourceTokens the resource tokens.
* @param path the path.
* @param resourceId the resource id.
* @return the authorization token.
*/
public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens,
String path,
String resourceId) {
if (resourceTokens == null) {
throw new IllegalArgumentException("resourceTokens");
}
String resourceToken = null;
if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) {
resourceToken = resourceTokens.get(resourceId);
} else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) {
if (resourceTokens.size() > 0) {
resourceToken = resourceTokens.values().iterator().next();
}
} else {
String[] pathParts = StringUtils.split(path, "/");
String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions",
"attachments", "media", "conflicts"};
HashSet<String> resourceTypesSet = new HashSet<String>();
Collections.addAll(resourceTypesSet, resourceTypes);
for (int i = pathParts.length - 1; i >= 0; --i) {
if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) {
resourceToken = resourceTokens.get(pathParts[i]);
}
}
}
return resourceToken;
}
private MacPool.ReUsableMac getReUseableMacInstance() {
reInitializeIfPossible();
return macPool.take();
}
/*
* Ensures that this.macInstance is initialized
* In-case of credential change, optimistically will try to refresh the macInstance
*
* Implementation is non-blocking, the one which acquire the lock will try to refresh
* with new credentials
*
* NOTE: Calling it CTOR ensured that default is initialized.
*/
private void reInitializeIfPossible() {
if (this.currentCredentialKey != this.credential.getKey()) {
boolean lockAcquired = this.macInstanceLock.tryLock();
if (lockAcquired) {
try {
if (this.currentCredentialKey != this.credential.getKey()) {
byte[] masterKeyBytes = this.credential.getKey().getBytes(StandardCharsets.UTF_8);
byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes);
SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256");
try {
Mac macInstance = Mac.getInstance("HMACSHA256");
macInstance.init(signingKey);
this.currentCredentialKey = this.credential.getKey();
this.macPool = new MacPool(macInstance);
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
throw new IllegalStateException(e);
}
}
} finally {
this.macInstanceLock.unlock();
}
}
}
}
} |
Yes, the .net PR returns the colls path for PartitionKey | private static String getResourceSegment(ResourceType resourceType) {
switch (resourceType) {
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case DocumentCollection:
case PartitionKey:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_PATH_SEGMENT;
case DatabaseAccount:
return "";
case ClientTelemetry:
return "";
case ClientEncryptionKey:
return Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
default:
return null;
}
} | case PartitionKey: | private static String getResourceSegment(ResourceType resourceType) {
switch (resourceType) {
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case DocumentCollection:
case PartitionKey:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_PATH_SEGMENT;
case DatabaseAccount:
return "";
case ClientTelemetry:
return "";
case ClientEncryptionKey:
return Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
default:
return null;
}
} | class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider {
private static final String AUTH_PREFIX = "type=master&ver=1.0&sig=";
private final AzureKeyCredential credential;
private volatile String currentCredentialKey;
private volatile MacPool macPool;
private final Lock macInstanceLock = new ReentrantLock();
public BaseAuthorizationTokenProvider(AzureKeyCredential credential) {
this.credential = credential;
reInitializeIfPossible();
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb.
* @param resourceIdOrFullName the resource id or full name
* @param resourceType the resource type.
* @param headers the request headers.
* @return the key authorization signature.
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
ResourceType resourceType,
Map<String, String> headers) {
return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName,
BaseAuthorizationTokenProvider.getResourceSegment(resourceType), headers);
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb
* @param resourceIdOrFullName the resource id or full name
* @param resourceSegment the resource segment
* @param headers the request headers
* @return the key authorization signature
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
String resourceSegment,
Map<String, String> headers) {
if (verb == null) {
throw new IllegalArgumentException("verb");
}
if (resourceIdOrFullName == null) {
resourceIdOrFullName = "";
}
if (resourceSegment == null) {
throw new IllegalArgumentException("resourceSegment");
}
if (headers == null) {
throw new IllegalArgumentException("headers");
}
if (StringUtils.isEmpty(this.credential.getKey())) {
throw new IllegalArgumentException("key credentials cannot be empty");
}
if(!PathsHelper.isNameBased(resourceIdOrFullName)) {
resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT);
}
StringBuilder body = new StringBuilder();
body.append(ModelBridgeInternal.toLower(verb))
.append('\n')
.append(resourceSegment)
.append('\n')
.append(resourceIdOrFullName)
.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
MacPool.ReUsableMac macInstance = getReUseableMacInstance();
try {
byte[] digest = macInstance.get().doFinal(body.toString().getBytes(StandardCharsets.UTF_8));
String auth = Utils.encodeBase64String(digest);
return AUTH_PREFIX + auth;
}
finally {
macInstance.close();
}
}
/**
* This API is a helper method to create auth header based on client request using resourceTokens.
*
* @param resourceTokens the resource tokens.
* @param path the path.
* @param resourceId the resource id.
* @return the authorization token.
*/
public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens,
String path,
String resourceId) {
if (resourceTokens == null) {
throw new IllegalArgumentException("resourceTokens");
}
String resourceToken = null;
if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) {
resourceToken = resourceTokens.get(resourceId);
} else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) {
if (resourceTokens.size() > 0) {
resourceToken = resourceTokens.values().iterator().next();
}
} else {
String[] pathParts = StringUtils.split(path, "/");
String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions",
"attachments", "media", "conflicts"};
HashSet<String> resourceTypesSet = new HashSet<String>();
Collections.addAll(resourceTypesSet, resourceTypes);
for (int i = pathParts.length - 1; i >= 0; --i) {
if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) {
resourceToken = resourceTokens.get(pathParts[i]);
}
}
}
return resourceToken;
}
private MacPool.ReUsableMac getReUseableMacInstance() {
reInitializeIfPossible();
return macPool.take();
}
/*
* Ensures that this.macInstance is initialized
* In-case of credential change, optimistically will try to refresh the macInstance
*
* Implementation is non-blocking, the one which acquire the lock will try to refresh
* with new credentials
*
* NOTE: Calling it CTOR ensured that default is initialized.
*/
private void reInitializeIfPossible() {
if (this.currentCredentialKey != this.credential.getKey()) {
boolean lockAcquired = this.macInstanceLock.tryLock();
if (lockAcquired) {
try {
if (this.currentCredentialKey != this.credential.getKey()) {
byte[] masterKeyBytes = this.credential.getKey().getBytes(StandardCharsets.UTF_8);
byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes);
SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256");
try {
Mac macInstance = Mac.getInstance("HMACSHA256");
macInstance.init(signingKey);
this.currentCredentialKey = this.credential.getKey();
this.macPool = new MacPool(macInstance);
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
throw new IllegalStateException(e);
}
}
} finally {
this.macInstanceLock.unlock();
}
}
}
}
} | class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider {
private static final String AUTH_PREFIX = "type=master&ver=1.0&sig=";
private final AzureKeyCredential credential;
private volatile String currentCredentialKey;
private volatile MacPool macPool;
private final Lock macInstanceLock = new ReentrantLock();
public BaseAuthorizationTokenProvider(AzureKeyCredential credential) {
this.credential = credential;
reInitializeIfPossible();
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb.
* @param resourceIdOrFullName the resource id or full name
* @param resourceType the resource type.
* @param headers the request headers.
* @return the key authorization signature.
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
ResourceType resourceType,
Map<String, String> headers) {
return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName,
BaseAuthorizationTokenProvider.getResourceSegment(resourceType), headers);
}
/**
* This API is a helper method to create auth header based on client request using masterkey.
*
* @param verb the verb
* @param resourceIdOrFullName the resource id or full name
* @param resourceSegment the resource segment
* @param headers the request headers
* @return the key authorization signature
*/
public String generateKeyAuthorizationSignature(RequestVerb verb,
String resourceIdOrFullName,
String resourceSegment,
Map<String, String> headers) {
if (verb == null) {
throw new IllegalArgumentException("verb");
}
if (resourceIdOrFullName == null) {
resourceIdOrFullName = "";
}
if (resourceSegment == null) {
throw new IllegalArgumentException("resourceSegment");
}
if (headers == null) {
throw new IllegalArgumentException("headers");
}
if (StringUtils.isEmpty(this.credential.getKey())) {
throw new IllegalArgumentException("key credentials cannot be empty");
}
if(!PathsHelper.isNameBased(resourceIdOrFullName)) {
resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT);
}
StringBuilder body = new StringBuilder();
body.append(ModelBridgeInternal.toLower(verb))
.append('\n')
.append(resourceSegment)
.append('\n')
.append(resourceIdOrFullName)
.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) {
body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase(Locale.ROOT));
}
body.append('\n');
MacPool.ReUsableMac macInstance = getReUseableMacInstance();
try {
byte[] digest = macInstance.get().doFinal(body.toString().getBytes(StandardCharsets.UTF_8));
String auth = Utils.encodeBase64String(digest);
return AUTH_PREFIX + auth;
}
finally {
macInstance.close();
}
}
/**
* This API is a helper method to create auth header based on client request using resourceTokens.
*
* @param resourceTokens the resource tokens.
* @param path the path.
* @param resourceId the resource id.
* @return the authorization token.
*/
public String getAuthorizationTokenUsingResourceTokens(Map<String, String> resourceTokens,
String path,
String resourceId) {
if (resourceTokens == null) {
throw new IllegalArgumentException("resourceTokens");
}
String resourceToken = null;
if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) {
resourceToken = resourceTokens.get(resourceId);
} else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) {
if (resourceTokens.size() > 0) {
resourceToken = resourceTokens.values().iterator().next();
}
} else {
String[] pathParts = StringUtils.split(path, "/");
String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions",
"attachments", "media", "conflicts"};
HashSet<String> resourceTypesSet = new HashSet<String>();
Collections.addAll(resourceTypesSet, resourceTypes);
for (int i = pathParts.length - 1; i >= 0; --i) {
if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) {
resourceToken = resourceTokens.get(pathParts[i]);
}
}
}
return resourceToken;
}
private MacPool.ReUsableMac getReUseableMacInstance() {
reInitializeIfPossible();
return macPool.take();
}
/*
* Ensures that this.macInstance is initialized
* In-case of credential change, optimistically will try to refresh the macInstance
*
* Implementation is non-blocking, the one which acquire the lock will try to refresh
* with new credentials
*
* NOTE: Calling it CTOR ensured that default is initialized.
*/
private void reInitializeIfPossible() {
if (this.currentCredentialKey != this.credential.getKey()) {
boolean lockAcquired = this.macInstanceLock.tryLock();
if (lockAcquired) {
try {
if (this.currentCredentialKey != this.credential.getKey()) {
byte[] masterKeyBytes = this.credential.getKey().getBytes(StandardCharsets.UTF_8);
byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(masterKeyBytes);
SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256");
try {
Mac macInstance = Mac.getInstance("HMACSHA256");
macInstance.init(signingKey);
this.currentCredentialKey = this.credential.getKey();
this.macPool = new MacPool(macInstance);
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
throw new IllegalStateException(e);
}
}
} finally {
this.macInstanceLock.unlock();
}
}
}
}
} |
Yes, according to the design doc, it has to go as POST. | private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
} | return this.performRequest(request, HttpMethod.POST); | private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
} | class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if(request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds());
if (OperationType.QueryPlan.equals(request.getOperationType())) {
responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds());
} else if (request.isAddressRefresh()) {
responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds());
}
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
}
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if(request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
reactorNettyRequestRecord.takeTimelineSnapshot());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()),
content);
DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot());
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null);
DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics);
}
return rsp;
})
.single();
}).map(rsp -> {
if (httpRequest.reactorNettyRequestRecord() != null) {
return new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
return new RxDocumentServiceResponse(this.clientContext, rsp);
}
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) {
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics)));
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
this.applySessionToken(request);
Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request);
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
}
).map(response ->
{
this.captureSessionToken(request, response.getResponseHeaders());
return response;
}
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private void applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL);
boolean sessionTokenApplicable =
Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
(this.defaultConsistencyLevel == ConsistencyLevel.SESSION &&
(!request.isReadOnlyRequest() ||
request.getResourceType() != ResourceType.Document ||
!Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString())));
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return;
}
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
return;
}
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
} | class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if(request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds());
if (OperationType.QueryPlan.equals(request.getOperationType())) {
responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds());
} else if (request.isAddressRefresh()) {
responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds());
}
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
}
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if(request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
reactorNettyRequestRecord.takeTimelineSnapshot());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()),
content);
DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot());
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null);
DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics);
}
return rsp;
})
.single();
}).map(rsp -> {
if (httpRequest.reactorNettyRequestRecord() != null) {
return new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
return new RxDocumentServiceResponse(this.clientContext, rsp);
}
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) {
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics)));
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
this.applySessionToken(request);
Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request);
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
}
).map(response ->
{
this.captureSessionToken(request, response.getResponseHeaders());
return response;
}
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private void applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL);
boolean sessionTokenApplicable =
Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
(this.defaultConsistencyLevel == ConsistencyLevel.SESSION &&
(!request.isReadOnlyRequest() ||
request.getResourceType() != ResourceType.Document ||
!Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString())));
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return;
}
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
return;
}
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
} |
For the ResourceType PartitionKey we always get isFeed as False, so if we keep it on the top it won't go in the below PartitionKey logic. This is the same that has been implemented in .Net PR as well. | private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) {
if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) {
String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType);
throw new IllegalArgumentException(errorMessage);
}
String resourcePath = null;
if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT;
} else if (!isFeed) {
resourcePath = resourceFullName;
} else if (resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT;
} else if (resourceType == ResourceType.DocumentCollection) {
resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.StoredProcedure) {
resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT;
} else if (resourceType == ResourceType.UserDefinedFunction) {
resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Trigger) {
resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Conflict) {
resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Attachment) {
resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.User) {
resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Permission) {
resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Document) {
resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Offer) {
return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.PartitionKeyRange) {
return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
} else if (resourceType == ResourceType.Schema) {
resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT;
} else if (resourceType == ResourceType.ClientEncryptionKey) {
resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
} else {
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
assert false : errorMessage;
throw new IllegalArgumentException(errorMessage);
}
return resourcePath;
} | } else if (!isFeed) { | private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) {
if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) {
String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType);
throw new IllegalArgumentException(errorMessage);
}
String resourcePath = null;
if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT;
} else if (!isFeed) {
resourcePath = resourceFullName;
} else if (resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT;
} else if (resourceType == ResourceType.DocumentCollection) {
resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.StoredProcedure) {
resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT;
} else if (resourceType == ResourceType.UserDefinedFunction) {
resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Trigger) {
resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Conflict) {
resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Attachment) {
resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.User) {
resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Permission) {
resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Document) {
resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Offer) {
return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.PartitionKeyRange) {
return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
} else if (resourceType == ResourceType.Schema) {
resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT;
} else if (resourceType == ResourceType.ClientEncryptionKey) {
resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT;
} else {
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
assert false : errorMessage;
throw new IllegalArgumentException(errorMessage);
}
return resourcePath;
} | class PathsHelper {
private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class);
public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) {
if (request.getIsNameBased()) {
return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType());
} else {
return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType());
}
}
public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) {
if (resourceName == null)
return null;
if (resourceType instanceof Database) {
return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName;
} else if (resourceOwnerFullName == null) {
return null;
} else if (resourceType instanceof DocumentCollection) {
return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof StoredProcedure) {
return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof UserDefinedFunction) {
return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Trigger) {
return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Conflict) {
return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof User) {
return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Permission) {
return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Document) {
return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Offer) {
return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Resource) {
return null;
}
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
assert false : errorMessage;
throw new IllegalArgumentException(errorMessage);
}
public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) {
if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) &&
resourceType != ResourceType.Database &&
resourceType != ResourceType.Offer &&
resourceType != ResourceType.MasterPartition &&
resourceType != ResourceType.ServerPartition &&
resourceType != ResourceType.DatabaseAccount &&
resourceType != ResourceType.Topology) {
throw new IllegalStateException("INVALID resource type");
}
if(ownerOrResourceId == null) {
ownerOrResourceId = StringUtils.EMPTY;
}
if (isFeed && resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT;
} else if (resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.DocumentCollection) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.DocumentCollection) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString();
} else if (isFeed && resourceType == ResourceType.Offer) {
return Paths.OFFERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Offer) {
return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.StoredProcedure) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.STORED_PROCEDURES_PATH_SEGMENT;
} else if (resourceType == ResourceType.StoredProcedure) {
ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" +
Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString();
} else if (isFeed && resourceType == ResourceType.UserDefinedFunction) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.UserDefinedFunction) {
ResourceId functionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" +
Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString();
} else if (isFeed && resourceType == ResourceType.Trigger) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.TRIGGERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Trigger) {
ResourceId triggerId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" +
Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString();
} else if (isFeed && resourceType == ResourceType.Conflict) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.CONFLICTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Conflict) {
ResourceId conflictId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" +
Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString();
} else if (isFeed && resourceType == ResourceType.PartitionKeyRange) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" +
documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
} else if (resourceType == ResourceType.PartitionKeyRange) {
ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" +
Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString();
} else if (isFeed && resourceType == ResourceType.Attachment) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" +
Paths.ATTACHMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Attachment) {
ResourceId attachmentId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" +
Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString();
} else if (isFeed && resourceType == ResourceType.User) {
return
Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" +
Paths.USERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.User) {
ResourceId userId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString();
} else if (isFeed && resourceType == ResourceType.Permission) {
ResourceId userId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" +
Paths.PERMISSIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Permission) {
ResourceId permissionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" +
Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString();
} else if (isFeed && resourceType == ResourceType.Document) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Document) {
ResourceId documentId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString();
} else if (isFeed && resourceType == ResourceType.MasterPartition) {
return Paths.PARTITIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.MasterPartition) {
return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.ServerPartition) {
return Paths.PARTITIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.ServerPartition) {
return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.Topology) {
return Paths.TOPOLOGY_PATH_SEGMENT;
} else if (resourceType == ResourceType.Topology) {
return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.DatabaseAccount) {
return Paths.DATABASE_ACCOUNT_PATH_SEGMENT;
} else if (resourceType == ResourceType.DatabaseAccount) {
return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (resourceType == ResourceType.ClientEncryptionKey) {
ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" +
Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString();
} else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT;
}
String errorMessage = "invalid resource type";
throw new IllegalStateException(errorMessage);
}
public static PathInfo parsePathSegments(String resourceUrl) {
String[] segments = StringUtils.strip(resourceUrl, "/").split("/");
if (segments == null || segments.length < 1) {
return null;
}
int uriSegmentsCount = segments.length;
String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/");
String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/")
: StringUtils.EMPTY;
if (uriSegmentsCount >= 2) {
if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) {
Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]);
if (!result.getLeft() || !result.getRight().isDatabaseId()) {
return parseNameSegments(resourceUrl, segments);
}
}
}
if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) {
return new PathInfo(true, segmentOne,
segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY,
false);
} else if (isResourceType(segmentTwo)) {
return new PathInfo(false, segmentTwo, segmentOne, false);
}
return null;
}
/**
* Method which will return boolean based on whether it is able to parse the
* path and name segment from resource url , and fill info in PathInfo object
* @param resourceUrl Complete ResourceLink
* @param pathInfo Path info object which will hold information
* @param clientVersion The Client version
* @return
*/
public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) {
pathInfo.resourcePath = StringUtils.EMPTY;
pathInfo.resourceIdOrFullName = StringUtils.EMPTY;
pathInfo.isFeed = false;
pathInfo.isNameBased = false;
if (StringUtils.isEmpty(resourceUrl)) {
return false;
}
String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR);
String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR);
if (segments == null || segments.length < 1) {
return false;
}
int uriSegmentsCount = segments.length;
String segmentOne = segments[uriSegmentsCount - 1];
String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY;
if (uriSegmentsCount >= 2) {
if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) {
Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]);
if (!result.getLeft() || !result.getRight().isDatabaseId()) {
pathInfo.isNameBased = true;
return tryParseNameSegments(resourceUrl, segments, pathInfo);
}
}
}
if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) {
pathInfo.isFeed = true;
pathInfo.resourcePath = segmentOne;
if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) {
pathInfo.resourceIdOrFullName = segmentTwo;
}
} else if (PathsHelper.isResourceType(segmentTwo)) {
pathInfo.isFeed = false;
pathInfo.resourcePath = segmentTwo;
pathInfo.resourceIdOrFullName = segmentOne;
if (!StringUtils.isEmpty(clientVersion)
&& pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) {
String attachmentId = null;
byte storeIndex = 0;
}
} else {
return false;
}
return true;
}
/**
* Method which will return boolean based on whether it is able to parse the
* name segment from resource url , and fill info in PathInfo object
* @param resourceUrl Complete ResourceLink
* @param segments
* @param pathInfo Path info object which will hold information
* @return
*/
private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) {
pathInfo.isFeed = false;
pathInfo.resourceIdOrFullName = "";
pathInfo.resourcePath = "";
if (segments == null || segments.length < 1) {
return false;
}
if (segments.length % 2 == 0) {
if (isResourceType(segments[segments.length - 2])) {
pathInfo.resourcePath = segments[segments.length - 2];
pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl);
return true;
}
} else {
if (isResourceType(segments[segments.length - 1])) {
pathInfo.isFeed = true;
pathInfo.resourcePath = segments[segments.length - 1];
String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT));
pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName);
return true;
}
}
return false;
}
public static PathInfo parseNameSegments(String resourceUrl, String[] segments) {
if (segments == null || segments.length < 1) {
return null;
}
if (segments.length % 2 == 0) {
if (isResourceType(segments[segments.length - 2])) {
return new PathInfo(false,
segments[segments.length - 2],
unescapeJavaAndTrim(resourceUrl),
true);
}
} else {
if (isResourceType(segments[segments.length - 1])) {
return new PathInfo(true,
segments[segments.length - 1],
unescapeJavaAndTrim(
resourceUrl.substring(0,
StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))),
true);
}
}
return null;
}
public static String unescapeJavaAndTrim(String resourceUrl) {
if (resourceUrl == null) {
return null;
}
int startInclusiveIndex = 0;
while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) {
startInclusiveIndex++;
}
if (startInclusiveIndex == resourceUrl.length()) {
return "";
}
int endExclusiveIndex = resourceUrl.length();
while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) {
endExclusiveIndex--;
}
for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) {
if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) {
return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT));
}
}
if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) {
return resourceUrl;
}
return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex);
}
private static boolean isResourceType(String resourcePathSegment) {
if (StringUtils.isEmpty(resourcePathSegment)) {
return false;
}
switch (resourcePathSegment.toLowerCase(Locale.ROOT)) {
case Paths.ATTACHMENTS_PATH_SEGMENT:
case Paths.COLLECTIONS_PATH_SEGMENT:
case Paths.DATABASES_PATH_SEGMENT:
case Paths.PERMISSIONS_PATH_SEGMENT:
case Paths.USERS_PATH_SEGMENT:
case Paths.DOCUMENTS_PATH_SEGMENT:
case Paths.STORED_PROCEDURES_PATH_SEGMENT:
case Paths.TRIGGERS_PATH_SEGMENT:
case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT:
case Paths.CONFLICTS_PATH_SEGMENT:
case Paths.MEDIA_PATH_SEGMENT:
case Paths.OFFERS_PATH_SEGMENT:
case Paths.PARTITIONS_PATH_SEGMENT:
case Paths.DATABASE_ACCOUNT_PATH_SEGMENT:
case Paths.TOPOLOGY_PATH_SEGMENT:
case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT:
case Paths.SCHEMAS_PATH_SEGMENT:
case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT:
return true;
default:
return false;
}
}
public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) {
switch (resourceType) {
case Database:
return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName;
case DocumentCollection:
return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName;
case StoredProcedure:
return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName;
case UserDefinedFunction:
return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName;
case Trigger:
return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName;
case Attachment:
return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName;
case Conflict:
return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName;
case Document:
return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName;
case Offer:
return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName;
case Permission:
return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName;
case User:
return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName;
case PartitionKeyRange:
return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName;
default:
return null;
}
}
public static String getCollectionPath(String resourceFullName) {
if (resourceFullName != null) {
String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int index = indexOfNth(trimmedResourceFullName, '/', 4);
if (index > 0)
return trimmedResourceFullName.substring(0, index);
}
return resourceFullName;
}
public static String getDatabasePath(String resourceFullName) {
if (resourceFullName != null) {
int index = indexOfNth(resourceFullName, '/', 2);
if (index > 0)
return resourceFullName.substring(0, index);
}
return resourceFullName;
}
public static String getParentByIndex(String resourceFullName, int segmentIndex) {
int index = indexOfNth(resourceFullName, '/', segmentIndex);
if (index > 0)
return resourceFullName.substring(0, index);
else {
index = indexOfNth(resourceFullName, '/', segmentIndex - 1);
if (index > 0)
return resourceFullName;
else
return null;
}
}
public static boolean isNameBased(String resourceIdOrFullName) {
if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty()
&& resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') {
return true;
}
return false;
}
private static int indexOfNth(String str, char value, int nthOccurance) {
int remaining = nthOccurance;
char[] characters = str.toCharArray();
for (int i = 0; i < characters.length; i++) {
if (characters[i] == value) {
remaining--;
if (remaining == 0) {
return i;
}
}
}
return -1;
}
public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException {
if (StringUtils.isEmpty(resourcePathSegment)) {
String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment");
throw new BadRequestException(message);
}
switch (resourcePathSegment) {
case Paths.ATTACHMENTS_PATH_SEGMENT:
return ResourceType.Attachment;
case Paths.COLLECTIONS_PATH_SEGMENT:
return ResourceType.DocumentCollection;
case Paths.DATABASES_PATH_SEGMENT:
return ResourceType.Database;
case Paths.PERMISSIONS_PATH_SEGMENT:
return ResourceType.Permission;
case Paths.USERS_PATH_SEGMENT:
return ResourceType.User;
case Paths.DOCUMENTS_PATH_SEGMENT:
return ResourceType.Document;
case Paths.STORED_PROCEDURES_PATH_SEGMENT:
return ResourceType.StoredProcedure;
case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT:
return ResourceType.UserDefinedFunction;
case Paths.TRIGGERS_PATH_SEGMENT:
return ResourceType.Trigger;
case Paths.CONFLICTS_PATH_SEGMENT:
return ResourceType.Conflict;
case Paths.OFFERS_PATH_SEGMENT:
return ResourceType.Offer;
case Paths.SCHEMAS_PATH_SEGMENT:
return ResourceType.Schema;
}
String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment);
throw new BadRequestException(errorMessage);
}
public static String getResourcePath(ResourceType resourceType) throws BadRequestException {
switch (resourceType) {
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case DocumentCollection:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case MasterPartition:
case ServerPartition:
return Paths.PARTITIONS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_ROOT;
case Schema:
return Paths.SCHEMAS_PATH_SEGMENT;
case DatabaseAccount:
case Topology:
return Paths.ROOT;
default:
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
throw new BadRequestException(errorMessage);
}
}
public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) {
String[] segments = StringUtils.split(resourceFullName, '/');
String[] resourcePathArray = getResourcePathArray(resourceType);
if (resourcePathArray == null) {
return false;
}
if (segments.length != resourcePathArray.length * 2) {
return false;
}
for (int i = 0; i < resourcePathArray.length; i++) {
if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) {
return false;
}
}
return true;
}
private static String[] getResourcePathArray(ResourceType resourceType) {
List<String> segments = new ArrayList<String>();
segments.add(Paths.DATABASES_PATH_SEGMENT);
if (resourceType == ResourceType.Permission ||
resourceType == ResourceType.User) {
segments.add(Paths.USERS_PATH_SEGMENT);
if (resourceType == ResourceType.Permission) {
segments.add(Paths.PERMISSIONS_PATH_SEGMENT);
}
} else if (resourceType == ResourceType.ClientEncryptionKey) {
segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
} else if (resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.StoredProcedure ||
resourceType == ResourceType.UserDefinedFunction ||
resourceType == ResourceType.Trigger ||
resourceType == ResourceType.Conflict ||
resourceType == ResourceType.Attachment ||
resourceType == ResourceType.Document ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.Schema) {
segments.add(Paths.COLLECTIONS_PATH_SEGMENT);
if (resourceType == ResourceType.StoredProcedure) {
segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT);
} else if(resourceType == ResourceType.UserDefinedFunction) {
segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
} else if(resourceType == ResourceType.Trigger) {
segments.add(Paths.TRIGGERS_PATH_SEGMENT);
} else if (resourceType == ResourceType.Conflict) {
segments.add(Paths.CONFLICTS_PATH_SEGMENT);
} else if (resourceType == ResourceType.Schema) {
segments.add(Paths.SCHEMAS_PATH_SEGMENT);
} else if(resourceType == ResourceType.Document ||
resourceType == ResourceType.Attachment) {
segments.add(Paths.DOCUMENTS_PATH_SEGMENT);
if (resourceType == ResourceType.Attachment) {
segments.add(Paths.ATTACHMENTS_PATH_SEGMENT);
}
} else if(resourceType == ResourceType.PartitionKeyRange) {
segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT);
} else if (resourceType == ResourceType.PartitionKey) {
segments.add(Paths.COLLECTIONS_PATH_SEGMENT);
segments.add(Paths.OPERATIONS_PATH_SEGMENT);
}
} else if (resourceType != ResourceType.Database) {
return null;
}
return segments.stream().toArray(String[]::new);
}
public static boolean validateResourceId(ResourceType resourceType, String resourceId) {
if (resourceType == ResourceType.Conflict) {
return PathsHelper.validateConflictId(resourceId);
} else if (resourceType == ResourceType.Database) {
return PathsHelper.validateDatabaseId(resourceId);
} else if (resourceType == ResourceType.DocumentCollection) {
return PathsHelper.validateDocumentCollectionId(resourceId);
} else if (resourceType == ResourceType.Document) {
return PathsHelper.validateDocumentId(resourceId);
} else if (resourceType == ResourceType.Permission) {
return PathsHelper.validatePermissionId(resourceId);
} else if (resourceType == ResourceType.StoredProcedure) {
return PathsHelper.validateStoredProcedureId(resourceId);
} else if (resourceType == ResourceType.Trigger) {
return PathsHelper.validateTriggerId(resourceId);
} else if (resourceType == ResourceType.UserDefinedFunction) {
return PathsHelper.validateUserDefinedFunctionId(resourceId);
} else if (resourceType == ResourceType.User) {
return PathsHelper.validateUserId(resourceId);
} else if (resourceType == ResourceType.Attachment) {
return PathsHelper.validateAttachmentId(resourceId);
} else if (resourceType == ResourceType.ClientEncryptionKey) {
return PathsHelper.validateClientEncryptionKeyId(resourceId);
}else {
logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString()));
return false;
}
}
public static boolean validateDatabaseId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDatabase() != 0;
}
public static boolean validateDocumentCollectionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDocumentCollection() != 0;
}
public static boolean validateDocumentId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDocument() != 0;
}
public static boolean validateConflictId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getConflict() != 0;
}
public static boolean validateAttachmentId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getAttachment() != 0;
}
public static boolean validatePermissionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getPermission() != 0;
}
public static boolean validateStoredProcedureId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getStoredProcedure() != 0;
}
public static boolean validateTriggerId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getTrigger() != 0;
}
public static boolean validateUserDefinedFunctionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0;
}
public static boolean validateUserId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getUser() != 0;
}
public static boolean validateClientEncryptionKeyId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0;
}
public static boolean isPublicResource(Resource resourceType) {
if (resourceType instanceof Database ||
resourceType instanceof DocumentCollection ||
resourceType instanceof StoredProcedure ||
resourceType instanceof UserDefinedFunction ||
resourceType instanceof Trigger ||
resourceType instanceof Conflict ||
resourceType instanceof User ||
resourceType instanceof Permission ||
resourceType instanceof Document ||
resourceType instanceof Offer
) {
return true;
} else {
return false;
}
}
} | class PathsHelper {
private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class);
public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) {
if (request.getIsNameBased()) {
return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType());
} else {
return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType());
}
}
public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) {
if (resourceName == null)
return null;
if (resourceType instanceof Database) {
return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName;
} else if (resourceOwnerFullName == null) {
return null;
} else if (resourceType instanceof DocumentCollection) {
return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof StoredProcedure) {
return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof UserDefinedFunction) {
return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Trigger) {
return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Conflict) {
return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof User) {
return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Permission) {
return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Document) {
return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Offer) {
return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName;
} else if (resourceType instanceof Resource) {
return null;
}
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
assert false : errorMessage;
throw new IllegalArgumentException(errorMessage);
}
public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) {
if (resourceType == ResourceType.PartitionKey) {
return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete);
} else {
return generatePath(resourceType, ownerOrResourceId, isFeed, null);
}
}
private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) {
if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) &&
resourceType != ResourceType.Database &&
resourceType != ResourceType.Offer &&
resourceType != ResourceType.MasterPartition &&
resourceType != ResourceType.ServerPartition &&
resourceType != ResourceType.DatabaseAccount &&
resourceType != ResourceType.Topology) {
throw new IllegalStateException("INVALID resource type");
}
if(ownerOrResourceId == null) {
ownerOrResourceId = StringUtils.EMPTY;
}
if (isFeed && resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT;
} else if (resourceType == ResourceType.Database) {
return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.DocumentCollection) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.DocumentCollection) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString();
} else if (isFeed && resourceType == ResourceType.Offer) {
return Paths.OFFERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Offer) {
return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.StoredProcedure) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.STORED_PROCEDURES_PATH_SEGMENT;
} else if (resourceType == ResourceType.StoredProcedure) {
ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" +
Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString();
} else if (isFeed && resourceType == ResourceType.UserDefinedFunction) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.UserDefinedFunction) {
ResourceId functionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" +
Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString();
} else if (isFeed && resourceType == ResourceType.Trigger) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.TRIGGERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Trigger) {
ResourceId triggerId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" +
Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString();
} else if (isFeed && resourceType == ResourceType.Conflict) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.CONFLICTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Conflict) {
ResourceId conflictId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" +
Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString();
} else if (isFeed && resourceType == ResourceType.PartitionKeyRange) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" +
documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
} else if (resourceType == ResourceType.PartitionKeyRange) {
ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" +
Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString();
} else if (isFeed && resourceType == ResourceType.Attachment) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" +
Paths.ATTACHMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Attachment) {
ResourceId attachmentId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" +
Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString();
} else if (isFeed && resourceType == ResourceType.User) {
return
Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" +
Paths.USERS_PATH_SEGMENT;
} else if (resourceType == ResourceType.User) {
ResourceId userId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString();
} else if (isFeed && resourceType == ResourceType.Permission) {
ResourceId userId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" +
Paths.PERMISSIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Permission) {
ResourceId permissionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" +
Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" +
Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString();
} else if (isFeed && resourceType == ResourceType.Document) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return
Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT;
} else if (resourceType == ResourceType.Document) {
ResourceId documentId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" +
Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString();
} else if (isFeed && resourceType == ResourceType.MasterPartition) {
return Paths.PARTITIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.MasterPartition) {
return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.ServerPartition) {
return Paths.PARTITIONS_PATH_SEGMENT;
} else if (resourceType == ResourceType.ServerPartition) {
return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.Topology) {
return Paths.TOPOLOGY_PATH_SEGMENT;
} else if (resourceType == ResourceType.Topology) {
return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (isFeed && resourceType == ResourceType.DatabaseAccount) {
return Paths.DATABASE_ACCOUNT_PATH_SEGMENT;
} else if (resourceType == ResourceType.DatabaseAccount) {
return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId;
} else if (resourceType == ResourceType.ClientEncryptionKey) {
ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" +
Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString();
} else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId);
return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" +
Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" +
Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT;
}
String errorMessage = "invalid resource type";
throw new IllegalStateException(errorMessage);
}
public static PathInfo parsePathSegments(String resourceUrl) {
String[] segments = StringUtils.strip(resourceUrl, "/").split("/");
if (segments == null || segments.length < 1) {
return null;
}
int uriSegmentsCount = segments.length;
String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/");
String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/")
: StringUtils.EMPTY;
if (uriSegmentsCount >= 2) {
if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) {
Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]);
if (!result.getLeft() || !result.getRight().isDatabaseId()) {
return parseNameSegments(resourceUrl, segments);
}
}
}
if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) {
return new PathInfo(true, segmentOne,
segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY,
false);
} else if (isResourceType(segmentTwo)) {
return new PathInfo(false, segmentTwo, segmentOne, false);
}
return null;
}
/**
* Method which will return boolean based on whether it is able to parse the
* path and name segment from resource url , and fill info in PathInfo object
* @param resourceUrl Complete ResourceLink
* @param pathInfo Path info object which will hold information
* @param clientVersion The Client version
* @return
*/
public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) {
pathInfo.resourcePath = StringUtils.EMPTY;
pathInfo.resourceIdOrFullName = StringUtils.EMPTY;
pathInfo.isFeed = false;
pathInfo.isNameBased = false;
if (StringUtils.isEmpty(resourceUrl)) {
return false;
}
String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR);
String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR);
if (segments == null || segments.length < 1) {
return false;
}
int uriSegmentsCount = segments.length;
String segmentOne = segments[uriSegmentsCount - 1];
String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY;
if (uriSegmentsCount >= 2) {
if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0
&& Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) {
Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]);
if (!result.getLeft() || !result.getRight().isDatabaseId()) {
pathInfo.isNameBased = true;
return tryParseNameSegments(resourceUrl, segments, pathInfo);
}
}
}
if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) {
pathInfo.isFeed = true;
pathInfo.resourcePath = segmentOne;
if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) {
pathInfo.resourceIdOrFullName = segmentTwo;
}
} else if (PathsHelper.isResourceType(segmentTwo)) {
pathInfo.isFeed = false;
pathInfo.resourcePath = segmentTwo;
pathInfo.resourceIdOrFullName = segmentOne;
if (!StringUtils.isEmpty(clientVersion)
&& pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) {
String attachmentId = null;
byte storeIndex = 0;
}
} else {
return false;
}
return true;
}
/**
* Method which will return boolean based on whether it is able to parse the
* name segment from resource url , and fill info in PathInfo object
* @param resourceUrl Complete ResourceLink
* @param segments
* @param pathInfo Path info object which will hold information
* @return
*/
private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) {
pathInfo.isFeed = false;
pathInfo.resourceIdOrFullName = "";
pathInfo.resourcePath = "";
if (segments == null || segments.length < 1) {
return false;
}
if (segments.length % 2 == 0) {
if (isResourceType(segments[segments.length - 2])) {
pathInfo.resourcePath = segments[segments.length - 2];
pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl);
return true;
}
} else {
if (isResourceType(segments[segments.length - 1])) {
pathInfo.isFeed = true;
pathInfo.resourcePath = segments[segments.length - 1];
String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT));
pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName);
return true;
}
}
return false;
}
public static PathInfo parseNameSegments(String resourceUrl, String[] segments) {
if (segments == null || segments.length < 1) {
return null;
}
if (segments.length % 2 == 0) {
if (isResourceType(segments[segments.length - 2])) {
return new PathInfo(false,
segments[segments.length - 2],
unescapeJavaAndTrim(resourceUrl),
true);
}
} else {
if (isResourceType(segments[segments.length - 1])) {
return new PathInfo(true,
segments[segments.length - 1],
unescapeJavaAndTrim(
resourceUrl.substring(0,
StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))),
true);
}
}
return null;
}
public static String unescapeJavaAndTrim(String resourceUrl) {
if (resourceUrl == null) {
return null;
}
int startInclusiveIndex = 0;
while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) {
startInclusiveIndex++;
}
if (startInclusiveIndex == resourceUrl.length()) {
return "";
}
int endExclusiveIndex = resourceUrl.length();
while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) {
endExclusiveIndex--;
}
for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) {
if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) {
return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT));
}
}
if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) {
return resourceUrl;
}
return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex);
}
private static boolean isResourceType(String resourcePathSegment) {
if (StringUtils.isEmpty(resourcePathSegment)) {
return false;
}
switch (resourcePathSegment.toLowerCase(Locale.ROOT)) {
case Paths.ATTACHMENTS_PATH_SEGMENT:
case Paths.COLLECTIONS_PATH_SEGMENT:
case Paths.DATABASES_PATH_SEGMENT:
case Paths.PERMISSIONS_PATH_SEGMENT:
case Paths.USERS_PATH_SEGMENT:
case Paths.DOCUMENTS_PATH_SEGMENT:
case Paths.STORED_PROCEDURES_PATH_SEGMENT:
case Paths.TRIGGERS_PATH_SEGMENT:
case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT:
case Paths.CONFLICTS_PATH_SEGMENT:
case Paths.MEDIA_PATH_SEGMENT:
case Paths.OFFERS_PATH_SEGMENT:
case Paths.PARTITIONS_PATH_SEGMENT:
case Paths.DATABASE_ACCOUNT_PATH_SEGMENT:
case Paths.TOPOLOGY_PATH_SEGMENT:
case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT:
case Paths.SCHEMAS_PATH_SEGMENT:
case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT:
return true;
default:
return false;
}
}
public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) {
switch (resourceType) {
case Database:
return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName;
case DocumentCollection:
return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName;
case StoredProcedure:
return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName;
case UserDefinedFunction:
return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName;
case Trigger:
return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName;
case Attachment:
return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName;
case Conflict:
return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName;
case Document:
return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName;
case Offer:
return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName;
case Permission:
return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName;
case User:
return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName;
case PartitionKeyRange:
return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName;
default:
return null;
}
}
public static String getCollectionPath(String resourceFullName) {
if (resourceFullName != null) {
String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int index = indexOfNth(trimmedResourceFullName, '/', 4);
if (index > 0)
return trimmedResourceFullName.substring(0, index);
}
return resourceFullName;
}
public static String getDatabasePath(String resourceFullName) {
if (resourceFullName != null) {
int index = indexOfNth(resourceFullName, '/', 2);
if (index > 0)
return resourceFullName.substring(0, index);
}
return resourceFullName;
}
public static String getParentByIndex(String resourceFullName, int segmentIndex) {
int index = indexOfNth(resourceFullName, '/', segmentIndex);
if (index > 0)
return resourceFullName.substring(0, index);
else {
index = indexOfNth(resourceFullName, '/', segmentIndex - 1);
if (index > 0)
return resourceFullName;
else
return null;
}
}
public static boolean isNameBased(String resourceIdOrFullName) {
if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty()
&& resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') {
return true;
}
return false;
}
private static int indexOfNth(String str, char value, int nthOccurance) {
int remaining = nthOccurance;
char[] characters = str.toCharArray();
for (int i = 0; i < characters.length; i++) {
if (characters[i] == value) {
remaining--;
if (remaining == 0) {
return i;
}
}
}
return -1;
}
public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException {
if (StringUtils.isEmpty(resourcePathSegment)) {
String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment");
throw new BadRequestException(message);
}
switch (resourcePathSegment) {
case Paths.ATTACHMENTS_PATH_SEGMENT:
return ResourceType.Attachment;
case Paths.COLLECTIONS_PATH_SEGMENT:
return ResourceType.DocumentCollection;
case Paths.DATABASES_PATH_SEGMENT:
return ResourceType.Database;
case Paths.PERMISSIONS_PATH_SEGMENT:
return ResourceType.Permission;
case Paths.USERS_PATH_SEGMENT:
return ResourceType.User;
case Paths.DOCUMENTS_PATH_SEGMENT:
return ResourceType.Document;
case Paths.STORED_PROCEDURES_PATH_SEGMENT:
return ResourceType.StoredProcedure;
case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT:
return ResourceType.UserDefinedFunction;
case Paths.TRIGGERS_PATH_SEGMENT:
return ResourceType.Trigger;
case Paths.CONFLICTS_PATH_SEGMENT:
return ResourceType.Conflict;
case Paths.OFFERS_PATH_SEGMENT:
return ResourceType.Offer;
case Paths.SCHEMAS_PATH_SEGMENT:
return ResourceType.Schema;
}
String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment);
throw new BadRequestException(errorMessage);
}
public static String getResourcePath(ResourceType resourceType) throws BadRequestException {
switch (resourceType) {
case Database:
return Paths.DATABASES_PATH_SEGMENT;
case DocumentCollection:
return Paths.COLLECTIONS_PATH_SEGMENT;
case Document:
return Paths.DOCUMENTS_PATH_SEGMENT;
case StoredProcedure:
return Paths.STORED_PROCEDURES_PATH_SEGMENT;
case UserDefinedFunction:
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
case Trigger:
return Paths.TRIGGERS_PATH_SEGMENT;
case Conflict:
return Paths.CONFLICTS_PATH_SEGMENT;
case Attachment:
return Paths.ATTACHMENTS_PATH_SEGMENT;
case User:
return Paths.USERS_PATH_SEGMENT;
case Permission:
return Paths.PERMISSIONS_PATH_SEGMENT;
case Offer:
return Paths.OFFERS_PATH_SEGMENT;
case MasterPartition:
case ServerPartition:
return Paths.PARTITIONS_PATH_SEGMENT;
case PartitionKeyRange:
return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT;
case Media:
return Paths.MEDIA_ROOT;
case Schema:
return Paths.SCHEMAS_PATH_SEGMENT;
case DatabaseAccount:
case Topology:
return Paths.ROOT;
default:
String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString());
throw new BadRequestException(errorMessage);
}
}
public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) {
String[] segments = StringUtils.split(resourceFullName, '/');
String[] resourcePathArray = getResourcePathArray(resourceType);
if (resourcePathArray == null) {
return false;
}
if (segments.length != resourcePathArray.length * 2) {
return false;
}
for (int i = 0; i < resourcePathArray.length; i++) {
if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) {
return false;
}
}
return true;
}
private static String[] getResourcePathArray(ResourceType resourceType) {
List<String> segments = new ArrayList<String>();
segments.add(Paths.DATABASES_PATH_SEGMENT);
if (resourceType == ResourceType.Permission ||
resourceType == ResourceType.User) {
segments.add(Paths.USERS_PATH_SEGMENT);
if (resourceType == ResourceType.Permission) {
segments.add(Paths.PERMISSIONS_PATH_SEGMENT);
}
} else if (resourceType == ResourceType.ClientEncryptionKey) {
segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
} else if (resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.StoredProcedure ||
resourceType == ResourceType.UserDefinedFunction ||
resourceType == ResourceType.Trigger ||
resourceType == ResourceType.Conflict ||
resourceType == ResourceType.Attachment ||
resourceType == ResourceType.Document ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.Schema) {
segments.add(Paths.COLLECTIONS_PATH_SEGMENT);
if (resourceType == ResourceType.StoredProcedure) {
segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT);
} else if(resourceType == ResourceType.UserDefinedFunction) {
segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
} else if(resourceType == ResourceType.Trigger) {
segments.add(Paths.TRIGGERS_PATH_SEGMENT);
} else if (resourceType == ResourceType.Conflict) {
segments.add(Paths.CONFLICTS_PATH_SEGMENT);
} else if (resourceType == ResourceType.Schema) {
segments.add(Paths.SCHEMAS_PATH_SEGMENT);
} else if(resourceType == ResourceType.Document ||
resourceType == ResourceType.Attachment) {
segments.add(Paths.DOCUMENTS_PATH_SEGMENT);
if (resourceType == ResourceType.Attachment) {
segments.add(Paths.ATTACHMENTS_PATH_SEGMENT);
}
} else if(resourceType == ResourceType.PartitionKeyRange) {
segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT);
} else if (resourceType == ResourceType.PartitionKey) {
segments.add(Paths.COLLECTIONS_PATH_SEGMENT);
segments.add(Paths.OPERATIONS_PATH_SEGMENT);
}
} else if (resourceType != ResourceType.Database) {
return null;
}
return segments.stream().toArray(String[]::new);
}
public static boolean validateResourceId(ResourceType resourceType, String resourceId) {
if (resourceType == ResourceType.Conflict) {
return PathsHelper.validateConflictId(resourceId);
} else if (resourceType == ResourceType.Database) {
return PathsHelper.validateDatabaseId(resourceId);
} else if (resourceType == ResourceType.DocumentCollection) {
return PathsHelper.validateDocumentCollectionId(resourceId);
} else if (resourceType == ResourceType.Document) {
return PathsHelper.validateDocumentId(resourceId);
} else if (resourceType == ResourceType.Permission) {
return PathsHelper.validatePermissionId(resourceId);
} else if (resourceType == ResourceType.StoredProcedure) {
return PathsHelper.validateStoredProcedureId(resourceId);
} else if (resourceType == ResourceType.Trigger) {
return PathsHelper.validateTriggerId(resourceId);
} else if (resourceType == ResourceType.UserDefinedFunction) {
return PathsHelper.validateUserDefinedFunctionId(resourceId);
} else if (resourceType == ResourceType.User) {
return PathsHelper.validateUserId(resourceId);
} else if (resourceType == ResourceType.Attachment) {
return PathsHelper.validateAttachmentId(resourceId);
} else if (resourceType == ResourceType.ClientEncryptionKey) {
return PathsHelper.validateClientEncryptionKeyId(resourceId);
}else {
logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString()));
return false;
}
}
public static boolean validateDatabaseId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDatabase() != 0;
}
public static boolean validateDocumentCollectionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDocumentCollection() != 0;
}
public static boolean validateDocumentId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getDocument() != 0;
}
public static boolean validateConflictId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getConflict() != 0;
}
public static boolean validateAttachmentId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getAttachment() != 0;
}
public static boolean validatePermissionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getPermission() != 0;
}
public static boolean validateStoredProcedureId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getStoredProcedure() != 0;
}
public static boolean validateTriggerId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getTrigger() != 0;
}
public static boolean validateUserDefinedFunctionId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0;
}
public static boolean validateUserId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getUser() != 0;
}
public static boolean validateClientEncryptionKeyId(String resourceIdString) {
Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0;
}
public static boolean isPublicResource(Resource resourceType) {
if (resourceType instanceof Database ||
resourceType instanceof DocumentCollection ||
resourceType instanceof StoredProcedure ||
resourceType instanceof UserDefinedFunction ||
resourceType instanceof Trigger ||
resourceType instanceof Conflict ||
resourceType instanceof User ||
resourceType instanceof Permission ||
resourceType instanceof Document ||
resourceType instanceof Offer
) {
return true;
} else {
return false;
}
}
} |
Any specific reason we have not switched the thread in this scenario ? | private Mono<StoreResponse> invokeStoreWithThroughputControlAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
return this.throughputControlStore.processRequest(
request,
Mono.defer(() -> this.invokeStoreInternalAsync(physicalAddress, request)));
} | return this.throughputControlStore.processRequest( | private Mono<StoreResponse> invokeStoreWithThroughputControlAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
return this.throughputControlStore.processRequest(
request,
Mono.defer(() -> this.invokeStoreInternalAsync(physicalAddress, request)));
} | class TransportClient implements AutoCloseable {
private final boolean switchOffIOThreadForResponse;
private ThroughputControlStore throughputControlStore;
public TransportClient() {
this.switchOffIOThreadForResponse = Configs.shouldSwitchOffIOThreadForResponse();
}
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
public Mono<StoreResponse> invokeResourceOperationAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (StringUtils.isEmpty(request.requestContext.resourcePhysicalAddress)) {
request.requestContext.resourcePhysicalAddress = physicalAddress.toString();
}
if (this.throughputControlStore != null) {
return this.invokeStoreWithThroughputControlAsync(physicalAddress, request);
}
return this.invokeStoreInternalAsync(physicalAddress, request);
}
protected abstract Mono<StoreResponse> invokeStoreAsync(
Uri physicalAddress,
RxDocumentServiceRequest request);
private Mono<StoreResponse> invokeStoreInternalAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (switchOffIOThreadForResponse) {
return this.invokeStoreAsync(physicalAddress, request).publishOn(CosmosSchedulers.TRANSPORT_RESPONSE_BOUNDED_ELASTIC);
}
return this.invokeStoreAsync(physicalAddress, request);
}
} | class TransportClient implements AutoCloseable {
private final boolean switchOffIOThreadForResponse = Configs.shouldSwitchOffIOThreadForResponse();
private ThroughputControlStore throughputControlStore;
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
public Mono<StoreResponse> invokeResourceOperationAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (StringUtils.isEmpty(request.requestContext.resourcePhysicalAddress)) {
request.requestContext.resourcePhysicalAddress = physicalAddress.toString();
}
if (this.throughputControlStore != null) {
return this.invokeStoreWithThroughputControlAsync(physicalAddress, request);
}
return this.invokeStoreInternalAsync(physicalAddress, request);
}
protected abstract Mono<StoreResponse> invokeStoreAsync(
Uri physicalAddress,
RxDocumentServiceRequest request);
private Mono<StoreResponse> invokeStoreInternalAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (switchOffIOThreadForResponse) {
return this.invokeStoreAsync(physicalAddress, request).publishOn(CosmosSchedulers.TRANSPORT_RESPONSE_BOUNDED_ELASTIC);
}
return this.invokeStoreAsync(physicalAddress, request);
}
} |
it is calling invokeStoreInternal, the switch thread logic is in invokeStoreInternal method | private Mono<StoreResponse> invokeStoreWithThroughputControlAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
return this.throughputControlStore.processRequest(
request,
Mono.defer(() -> this.invokeStoreInternalAsync(physicalAddress, request)));
} | return this.throughputControlStore.processRequest( | private Mono<StoreResponse> invokeStoreWithThroughputControlAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
return this.throughputControlStore.processRequest(
request,
Mono.defer(() -> this.invokeStoreInternalAsync(physicalAddress, request)));
} | class TransportClient implements AutoCloseable {
private final boolean switchOffIOThreadForResponse;
private ThroughputControlStore throughputControlStore;
public TransportClient() {
this.switchOffIOThreadForResponse = Configs.shouldSwitchOffIOThreadForResponse();
}
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
public Mono<StoreResponse> invokeResourceOperationAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (StringUtils.isEmpty(request.requestContext.resourcePhysicalAddress)) {
request.requestContext.resourcePhysicalAddress = physicalAddress.toString();
}
if (this.throughputControlStore != null) {
return this.invokeStoreWithThroughputControlAsync(physicalAddress, request);
}
return this.invokeStoreInternalAsync(physicalAddress, request);
}
protected abstract Mono<StoreResponse> invokeStoreAsync(
Uri physicalAddress,
RxDocumentServiceRequest request);
private Mono<StoreResponse> invokeStoreInternalAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (switchOffIOThreadForResponse) {
return this.invokeStoreAsync(physicalAddress, request).publishOn(CosmosSchedulers.TRANSPORT_RESPONSE_BOUNDED_ELASTIC);
}
return this.invokeStoreAsync(physicalAddress, request);
}
} | class TransportClient implements AutoCloseable {
private final boolean switchOffIOThreadForResponse = Configs.shouldSwitchOffIOThreadForResponse();
private ThroughputControlStore throughputControlStore;
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
public Mono<StoreResponse> invokeResourceOperationAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (StringUtils.isEmpty(request.requestContext.resourcePhysicalAddress)) {
request.requestContext.resourcePhysicalAddress = physicalAddress.toString();
}
if (this.throughputControlStore != null) {
return this.invokeStoreWithThroughputControlAsync(physicalAddress, request);
}
return this.invokeStoreInternalAsync(physicalAddress, request);
}
protected abstract Mono<StoreResponse> invokeStoreAsync(
Uri physicalAddress,
RxDocumentServiceRequest request);
private Mono<StoreResponse> invokeStoreInternalAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (switchOffIOThreadForResponse) {
return this.invokeStoreAsync(physicalAddress, request).publishOn(CosmosSchedulers.TRANSPORT_RESPONSE_BOUNDED_ELASTIC);
}
return this.invokeStoreAsync(physicalAddress, request);
}
} |
nevermind it is going in common method | private Mono<StoreResponse> invokeStoreWithThroughputControlAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
return this.throughputControlStore.processRequest(
request,
Mono.defer(() -> this.invokeStoreInternalAsync(physicalAddress, request)));
} | return this.throughputControlStore.processRequest( | private Mono<StoreResponse> invokeStoreWithThroughputControlAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
return this.throughputControlStore.processRequest(
request,
Mono.defer(() -> this.invokeStoreInternalAsync(physicalAddress, request)));
} | class TransportClient implements AutoCloseable {
private final boolean switchOffIOThreadForResponse;
private ThroughputControlStore throughputControlStore;
public TransportClient() {
this.switchOffIOThreadForResponse = Configs.shouldSwitchOffIOThreadForResponse();
}
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
public Mono<StoreResponse> invokeResourceOperationAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (StringUtils.isEmpty(request.requestContext.resourcePhysicalAddress)) {
request.requestContext.resourcePhysicalAddress = physicalAddress.toString();
}
if (this.throughputControlStore != null) {
return this.invokeStoreWithThroughputControlAsync(physicalAddress, request);
}
return this.invokeStoreInternalAsync(physicalAddress, request);
}
protected abstract Mono<StoreResponse> invokeStoreAsync(
Uri physicalAddress,
RxDocumentServiceRequest request);
private Mono<StoreResponse> invokeStoreInternalAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (switchOffIOThreadForResponse) {
return this.invokeStoreAsync(physicalAddress, request).publishOn(CosmosSchedulers.TRANSPORT_RESPONSE_BOUNDED_ELASTIC);
}
return this.invokeStoreAsync(physicalAddress, request);
}
} | class TransportClient implements AutoCloseable {
private final boolean switchOffIOThreadForResponse = Configs.shouldSwitchOffIOThreadForResponse();
private ThroughputControlStore throughputControlStore;
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
public Mono<StoreResponse> invokeResourceOperationAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (StringUtils.isEmpty(request.requestContext.resourcePhysicalAddress)) {
request.requestContext.resourcePhysicalAddress = physicalAddress.toString();
}
if (this.throughputControlStore != null) {
return this.invokeStoreWithThroughputControlAsync(physicalAddress, request);
}
return this.invokeStoreInternalAsync(physicalAddress, request);
}
protected abstract Mono<StoreResponse> invokeStoreAsync(
Uri physicalAddress,
RxDocumentServiceRequest request);
private Mono<StoreResponse> invokeStoreInternalAsync(Uri physicalAddress, RxDocumentServiceRequest request) {
if (switchOffIOThreadForResponse) {
return this.invokeStoreAsync(physicalAddress, request).publishOn(CosmosSchedulers.TRANSPORT_RESPONSE_BOUNDED_ELASTIC);
}
return this.invokeStoreAsync(physicalAddress, request);
}
} |
Can you please elaborate on this logic , why we needed it first place, and how it will fix our caching issue moving from UUID to incremental counter | private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
} | return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter; | private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
} | class AbstractQueryGenerator {
protected AbstractQueryGenerator() {
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.join(" ", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
protected AbstractQueryGenerator() {
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.join(" ", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
@simplynaveen20 In Spring, you can have duplicate parameters in query which are of the same name - for example this query - `Iterable<Project> findByNameAndCreatorOrNameAndCreator(String name, String creator, String name2, String creator2);` This would get converted to this - `select * from c where c.name = @name and c.creator = @creator OR c.name = @name and c.creator = @creator` This cause problems when running the query - as Gateway throws exception saying 400 bad request. To avoid this issue, when we are generating the query params, we were appending random UUID to have different query param names for same fields. However, that was resulting in a different query text for the same query if executed twice in the same JVM. To solve the query text issue, now I am using a counter - which gets initialized when generating a query text. That way, same queries (conceptually) now will have same query text. Which is what we want. | private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
} | return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter; | private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
} | class AbstractQueryGenerator {
protected AbstractQueryGenerator() {
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.join(" ", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
protected AbstractQueryGenerator() {
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.join(" ", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
This was the issue that we fixed earlier for duplicate query params in the same query - https://github.com/Azure/azure-sdk-for-java/issues/17961 And this was the fix that we did last time - https://github.com/Azure/azure-sdk-for-java/pull/17962 That's why it is important to have a counter. However, counter is not random so it will have the same query text every time the same query is executed. Counter is just added to differentiate between duplicate query param names. | private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
} | return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter; | private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
} | class AbstractQueryGenerator {
protected AbstractQueryGenerator() {
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.join(" ", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
protected AbstractQueryGenerator() {
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.join(" ", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
size is removed here. It doesn't tell us anything useful. | private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = unconfirmedSends.remove(correlationId);
if (sink == null) {
logger.warning(
"connectionId[{}] linkName[{}] messageId[{}] Received delivery without pending message.",
connectionId, linkName, id);
return;
}
sink.success(message);
} | logger.warning( | private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = unconfirmedSends.remove(correlationId);
if (sink == null) {
logger.warning(
"connectionId[{}] linkName[{}] messageId[{}] Received delivery without pending message.",
connectionId, linkName, id);
return;
}
sink.success(message);
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*
* @throws RuntimeException if the send/receive links could not be locally scheduled to open.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(sendLink, sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(receiveLink, receiveLinkHandler);
this.subscriptions = Disposables.composite(
receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}] messageId[{}]: Settling message.", connectionId,
linkName, message.getCorrelationId());
settleMessage(message);
}),
receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException | RejectedExecutionException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s] linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, retryOptions, activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}] messageId[{}]: Scheduling on dispatcher. ",
connectionId, linkName, messageId);
unconfirmedSends.putIfAbsent(messageId, sink);
provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
receiveLink.flow(1);
sendLink.send(bytes, 0, encodedSize);
delivery.settle();
sendLink.advance();
});
} catch (IOException | RejectedExecutionException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return receiveLinkHandler.getErrorContext(receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void handleError(Throwable error, String message) {
if (hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
}
private void onTerminalState(String handlerName) {
if (pendingLinkTerminations.get() <= 0) {
logger.verbose("connectionId[{}] linkName[{}] Already disposed send/receive links.");
return;
}
final int remaining = pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}] {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
connectionId, linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
connectionId, linkName, unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
connectionId, linkName, count, error.getMessage());
}
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*
* @throws RuntimeException if the send/receive links could not be locally scheduled to open.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(sendLink, sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(receiveLink, receiveLinkHandler);
this.subscriptions = Disposables.composite(
receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}] messageId[{}]: Settling message.", connectionId,
linkName, message.getCorrelationId());
settleMessage(message);
}),
receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException | RejectedExecutionException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s] linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, retryOptions, activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}] messageId[{}]: Scheduling on dispatcher. ",
connectionId, linkName, messageId);
unconfirmedSends.putIfAbsent(messageId, sink);
provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
receiveLink.flow(1);
sendLink.send(bytes, 0, encodedSize);
delivery.settle();
sendLink.advance();
});
} catch (IOException | RejectedExecutionException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return receiveLinkHandler.getErrorContext(receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void handleError(Throwable error, String message) {
if (hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
}
private void onTerminalState(String handlerName) {
if (pendingLinkTerminations.get() <= 0) {
logger.verbose("connectionId[{}] linkName[{}] Already disposed send/receive links.");
return;
}
final int remaining = pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}] {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
connectionId, linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
connectionId, linkName, unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
connectionId, linkName, count, error.getMessage());
}
} |
Thoughts on switching over to using a logger here given that CI has logging configurations? Also, add a `%n` to the `printf` so it is new-lined. | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms", testName, duration);
}
} | System.out.printf("%s is starting", testName); | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting%n", testName);
LOGGER.info("{} is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms%n", testName, duration);
LOGGER.info("{} finished and took {} ms", testName, duration);
}
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} |
Do we want to keep the `catch` and log the testing error? | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms", testName, duration);
}
} | } finally { | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting%n", testName);
LOGGER.info("{} is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms%n", testName, duration);
LOGGER.info("{} finished and took {} ms", testName, duration);
}
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} |
Same question on using a log here instead. Also, add a `%n` to the `printf` so it is new-lined. | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms", testName, duration);
}
} | long duration = System.currentTimeMillis() - startTimestamp; | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting%n", testName);
LOGGER.info("{} is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms%n", testName, duration);
LOGGER.info("{} finished and took {} ms", testName, duration);
}
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} |
I'm going to add logger but keep printf - printf output displays on devops job page which is nice, but having entries in log file are going to help analyze it without crossreferencing with devops output. | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms", testName, duration);
}
} | System.out.printf("%s is starting", testName); | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting%n", testName);
LOGGER.info("{} is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms%n", testName, duration);
LOGGER.info("{} finished and took {} ms", testName, duration);
}
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} |
I think we might have disturbed the test framework by intercepting this in such a way. If it's exception then test framework should dump it, if it's Error then JVM should dump it into output when it bubbles. | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms", testName, duration);
}
} | } finally { | public void intercept(IMethodInvocation invocation) throws Throwable {
String testName = TestNameProvider.getTestName(invocation.getIteration());
System.out.printf("%s is starting%n", testName);
LOGGER.info("{} is starting", testName);
long startTimestamp = System.currentTimeMillis();
try {
invocation.proceed();
} finally {
long duration = System.currentTimeMillis() - startTimestamp;
System.out.printf("%s finished and took %d ms%n", testName, duration);
LOGGER.info("{} finished and took {} ms", testName, duration);
}
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} | class TestHeaderIterationInterceptor implements IMethodInterceptor {
@Override
} |
```suggestion // and if not dispose it (aka drain it). ``` | public static void closeConnection(Connection reactorNettyConnection) {
if (reactorNettyConnection instanceof ChannelOperations) {
ChannelOperations<?, ?> channelOperations = (ChannelOperations<?, ?>) reactorNettyConnection;
if (!channelOperations.isInboundDisposed()) {
channelOperations.channel().eventLoop().execute(channelOperations::discard);
}
} else if (!reactorNettyConnection.isDisposed()) {
reactorNettyConnection.channel().eventLoop().execute(reactorNettyConnection::dispose);
}
} | public static void closeConnection(Connection reactorNettyConnection) {
if (reactorNettyConnection instanceof ChannelOperations) {
ChannelOperations<?, ?> channelOperations = (ChannelOperations<?, ?>) reactorNettyConnection;
if (!channelOperations.isInboundDisposed()) {
channelOperations.channel().eventLoop().execute(channelOperations::discard);
}
} else if (!reactorNettyConnection.isDisposed()) {
reactorNettyConnection.channel().eventLoop().execute(reactorNettyConnection::dispose);
}
} | class Utility {
/**
* Deep copies the passed {@link ByteBuf} into a {@link ByteBuffer}.
* <p>
* Using this method ensures that data returned by the network is resilient against Reactor Netty releasing the
* passed {@link ByteBuf} once the {@code doOnNext} operator fires.
*
* @param byteBuf The Netty {@link ByteBuf} to deep copy.
* @return A newly allocated {@link ByteBuffer} containing the copied bytes.
*/
public static ByteBuffer deepCopyBuffer(ByteBuf byteBuf) {
ByteBuffer buffer = ByteBuffer.allocate(byteBuf.readableBytes());
byteBuf.readBytes(buffer);
buffer.rewind();
return buffer;
}
/**
* Closes a connection if it hasn't been disposed.
*
* @param reactorNettyConnection The connection to close.
*/
private Utility() {
}
} | class Utility {
/**
* Deep copies the passed {@link ByteBuf} into a {@link ByteBuffer}.
* <p>
* Using this method ensures that data returned by the network is resilient against Reactor Netty releasing the
* passed {@link ByteBuf} once the {@code doOnNext} operator fires.
*
* @param byteBuf The Netty {@link ByteBuf} to deep copy.
* @return A newly allocated {@link ByteBuffer} containing the copied bytes.
*/
public static ByteBuffer deepCopyBuffer(ByteBuf byteBuf) {
ByteBuffer buffer = ByteBuffer.allocate(byteBuf.readableBytes());
byteBuf.readBytes(buffer);
buffer.rewind();
return buffer;
}
/**
* Closes a connection if it hasn't been disposed.
*
* @param reactorNettyConnection The connection to close.
*/
private Utility() {
}
} | |
spring has api to do this thing, we should leverage that instead of dealing with these stream by ourselves ( I understand this is easy, but still) `org.springframework.boot.autoconfigure.info.ProjectInfoAutoConfiguration#loadSource` | private static String getVersion() {
String version = "unknown";
Properties properties = new Properties();
try (InputStream inputStream =
new FileInputStream(ResourceUtils.getFile("classpath:azure-spring-boot-version.txt"))) {
properties.load(inputStream);
version = properties.getProperty("version");
} catch (IOException e) {
LOGGER.warn("Can not get version.");
}
return version;
} | try (InputStream inputStream = | private static String getVersion() {
String version = "unknown";
try {
Properties properties = PropertiesLoaderUtils.loadProperties(
new ClassPathResource("project.properties"));
version = properties.getProperty("version");
} catch (IOException e) {
LOGGER.warn("Can not get version.");
}
return version;
} | class ApplicationId {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationId.class);
public static final String VERSION = getVersion();
public static final String AZURE_SPRING_KEY_VAULT = "az-sp-kv/" + VERSION;
public static final String AZURE_SPRING_SERVICE_BUS = "az-sp-bus/" + VERSION;
public static final String AZURE_SPRING_STORAGE_BLOB = "az-sp-sb/" + VERSION;
public static final String AZURE_SPRING_STORAGE_FILES = "az-sp-sf/" + VERSION;
/**
* AZURE_SPRING_AAD does not contain VERSION, because AAD server support 2 headers: 1. x-client-SKU; 2.
* x-client-VER;
*/
public static final String AZURE_SPRING_AAD = "az-sp-aad";
public static final String AZURE_SPRING_B2C = "az-sp-b2c";
} | class ApplicationId {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationId.class);
public static final String VERSION = getVersion();
public static final String AZURE_SPRING_KEY_VAULT = "az-sp-kv/" + VERSION;
public static final String AZURE_SPRING_SERVICE_BUS = "az-sp-bus/" + VERSION;
public static final String AZURE_SPRING_STORAGE_BLOB = "az-sp-sb/" + VERSION;
public static final String AZURE_SPRING_STORAGE_FILES = "az-sp-sf/" + VERSION;
/**
* AZURE_SPRING_AAD does not contain VERSION, because AAD server support 2 headers: 1. x-client-SKU; 2.
* x-client-VER;
*/
public static final String AZURE_SPRING_AAD = "az-sp-aad";
public static final String AZURE_SPRING_B2C = "az-sp-b2c";
} |
Does this mean you cannot "reset" the value of a header that's already there? Is there a `remove()` method? | public HttpHeaders add(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
return this;
} else {
headers.compute(caseInsensitiveName, (key, header) -> {
if (header == null) {
return new HttpHeader(name, value);
} else {
header.addValue(value);
return header;
}
});
}
return this;
} | } else { | public HttpHeaders add(String name, String value) {
String caseInsensitiveName = formatKey(name);
if (caseInsensitiveName == null || value == null) {
return this;
}
headers.compute(caseInsensitiveName, (key, header) -> {
if (header == null) {
return new HttpHeader(name, value);
} else {
header.addValue(value);
return header;
}
});
return this;
} | class HttpHeaders implements Iterable<HttpHeader> {
private final Map<String, HttpHeader> headers;
/**
* Create an empty HttpHeaders instance.
*/
public HttpHeaders() {
headers = new HashMap<>();
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the map of initial headers
*/
public HttpHeaders(Map<String, String> headers) {
this.headers = new HashMap<>(headers.size());
headers.forEach(this::set);
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the collection of initial headers
*/
public HttpHeaders(Iterable<HttpHeader> headers) {
this.headers = new HashMap<>();
for (final HttpHeader header : headers) {
this.set(header.getName(), header.getValue());
}
}
/**
* Create a HttpHeaders instance with an initial {@code size} empty headers
*
* @param initialCapacity the initial capacity of headers map.
*/
public HttpHeaders(int initialCapacity) {
this.headers = new HashMap<>(initialCapacity);
}
/**
* Gets the number of headers in the collection.
*
* @return the number of headers in this collection.
*/
public int getSize() {
return headers.size();
}
/**
* Adds a {@link HttpHeader header} with the given name and value if a header with that name doesn't already exist,
* otherwise adds the {@code value} to the existing header.
*
* @param name The name of the header.
* @param value The value of the header.
* @return The updated HttpHeaders object.
*/
/**
* Sets a {@link HttpHeader header} with the given name and value.
*
* <p>If header with same name already exists then the value will be overwritten.</p>
*
* @param name the name
* @param value the value
* @return The updated HttpHeaders object
* @deprecated Use {@link
*/
@Deprecated
public HttpHeaders put(String name, String value) {
return set(name, value);
}
/**
* Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then
* the value will be overwritten. If the given value is null, the header with the given name will be removed.
*
* @param name the name to set in the header. If it is null, this method will return with no changes to the
* headers.
* @param value the value
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, value));
}
return this;
}
/**
* Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given values
* will be comma-separated when necessary. If a header with same name already exists then the values will be
* overwritten. If the given values list is null, the header with the given name will be removed.
*
* @param name the name
* @param values the values that will be comma-separated as appropriate
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, List<String> values) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (values == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, values));
}
return this;
}
/**
* Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling {@code
* headers.forEach(this::set)}, and therefore the behavior is as specified in {@link
* words, this will create a header for each key in the provided map, replacing or removing an existing one,
* depending on the value. If the given values list is null, the header with the given name will be removed. If the
* given name is already a header, it will be removed and replaced with the headers provided.
*
* @param headers a map containing keys representing header names, and keys representing the associated values.
* @return The updated HttpHeaders object
*/
public HttpHeaders setAll(Map<String, List<String>> headers) {
headers.forEach(this::set);
return this;
}
/**
* Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to find.
* @return the header if found, null otherwise.
*/
public HttpHeader get(String name) {
return headers.get(formatKey(name));
}
/**
* Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to remove.
* @return the header if removed, null otherwise.
*/
public HttpHeader remove(String name) {
return headers.remove(formatKey(name));
}
/**
* Get the value for the provided header name. {@code Null} is returned if the header name isn't found.
*
* @param name the name of the header whose value is being retrieved.
* @return the value of the header, or null if the header isn't found
*/
public String getValue(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValue();
}
/**
* Get the values for the provided header name. {@code Null} is returned if the header name isn't found.
*
* <p>This returns {@link
*
* @param name the name of the header whose value is being retrieved.
* @return the values of the header, or null if the header isn't found
*/
public String[] getValues(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValues();
}
private String formatKey(final String key) {
return key.toLowerCase(Locale.ROOT);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String> toMap() {
final Map<String, String> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValue());
}
return Collections.unmodifiableMap(result);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMultiMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String[]> toMultiMap() {
final Map<String, String[]> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValues());
}
return Collections.unmodifiableMap(result);
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<HttpHeader> iterator() {
return headers.values().iterator();
}
/**
* Get a {@link Stream} representation of the HttpHeader values in this instance.
*
* @return A {@link Stream} of all header values in this instance.
*/
public Stream<HttpHeader> stream() {
return headers.values().stream();
}
@Override
public String toString() {
return this.stream()
.map(header -> header.getName() + "=" + header.getValue())
.collect(Collectors.joining(", "));
}
} | class HttpHeaders implements Iterable<HttpHeader> {
private final Map<String, HttpHeader> headers;
/**
* Create an empty HttpHeaders instance.
*/
public HttpHeaders() {
headers = new HashMap<>();
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the map of initial headers
*/
public HttpHeaders(Map<String, String> headers) {
this.headers = new HashMap<>(headers.size());
headers.forEach(this::set);
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the collection of initial headers
*/
public HttpHeaders(Iterable<HttpHeader> headers) {
this.headers = new HashMap<>();
for (final HttpHeader header : headers) {
this.set(header.getName(), header.getValue());
}
}
/**
* Create a HttpHeaders instance with an initial {@code size} empty headers
*
* @param initialCapacity the initial capacity of headers map.
*/
public HttpHeaders(int initialCapacity) {
this.headers = new HashMap<>(initialCapacity);
}
/**
* Gets the number of headers in the collection.
*
* @return the number of headers in this collection.
*/
public int getSize() {
return headers.size();
}
/**
* Adds a {@link HttpHeader header} with the given name and value if a header with that name doesn't already exist,
* otherwise adds the {@code value} to the existing header.
*
* @param name The name of the header.
* @param value The value of the header.
* @return The updated HttpHeaders object.
*/
/**
* Sets a {@link HttpHeader header} with the given name and value.
*
* <p>If header with same name already exists then the value will be overwritten.</p>
*
* @param name the name
* @param value the value
* @return The updated HttpHeaders object
* @deprecated Use {@link
*/
@Deprecated
public HttpHeaders put(String name, String value) {
return set(name, value);
}
/**
* Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then
* the value will be overwritten. If the given value is null, the header with the given name will be removed.
*
* @param name the name to set in the header. If it is null, this method will return with no changes to the
* headers.
* @param value the value
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, value));
}
return this;
}
/**
* Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given values
* will be comma-separated when necessary. If a header with same name already exists then the values will be
* overwritten. If the given values list is null, the header with the given name will be removed.
*
* @param name the name
* @param values the values that will be comma-separated as appropriate
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, List<String> values) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (CoreUtils.isNullOrEmpty(values)) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, values));
}
return this;
}
/**
* Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling {@code
* headers.forEach(this::set)}, and therefore the behavior is as specified in {@link
* words, this will create a header for each key in the provided map, replacing or removing an existing one,
* depending on the value. If the given values list is null, the header with the given name will be removed. If the
* given name is already a header, it will be removed and replaced with the headers provided.
*
* @param headers a map containing keys representing header names, and keys representing the associated values.
* @return The updated HttpHeaders object
*/
public HttpHeaders setAll(Map<String, List<String>> headers) {
headers.forEach(this::set);
return this;
}
/**
* Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to find.
* @return the header if found, null otherwise.
*/
public HttpHeader get(String name) {
return headers.get(formatKey(name));
}
/**
* Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to remove.
* @return the header if removed, null otherwise.
*/
public HttpHeader remove(String name) {
return headers.remove(formatKey(name));
}
/**
* Get the value for the provided header name. {@code Null} is returned if the header name isn't found.
*
* @param name the name of the header whose value is being retrieved.
* @return the value of the header, or null if the header isn't found
*/
public String getValue(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValue();
}
/**
* Get the values for the provided header name. {@code Null} is returned if the header name isn't found.
*
* <p>This returns {@link
*
* @param name the name of the header whose value is being retrieved.
* @return the values of the header, or null if the header isn't found
*/
public String[] getValues(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValues();
}
private String formatKey(final String key) {
return (key == null) ? null : key.toLowerCase(Locale.ROOT);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String> toMap() {
final Map<String, String> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValue());
}
return Collections.unmodifiableMap(result);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMultiMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String[]> toMultiMap() {
final Map<String, String[]> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValues());
}
return Collections.unmodifiableMap(result);
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<HttpHeader> iterator() {
return headers.values().iterator();
}
/**
* Get a {@link Stream} representation of the HttpHeader values in this instance.
*
* @return A {@link Stream} of all header values in this instance.
*/
public Stream<HttpHeader> stream() {
return headers.values().stream();
}
@Override
public String toString() {
return this.stream()
.map(header -> header.getName() + "=" + header.getValue())
.collect(Collectors.joining(", "));
}
} |
Updated. I didn't use `org.springframework.boot.autoconfigure.info.ProjectInfoAutoConfiguration#loadSource` because it protected. I use `PropertiesLoaderUtils` and `ClassPathResource` instead. | private static String getVersion() {
String version = "unknown";
Properties properties = new Properties();
try (InputStream inputStream =
new FileInputStream(ResourceUtils.getFile("classpath:azure-spring-boot-version.txt"))) {
properties.load(inputStream);
version = properties.getProperty("version");
} catch (IOException e) {
LOGGER.warn("Can not get version.");
}
return version;
} | try (InputStream inputStream = | private static String getVersion() {
String version = "unknown";
try {
Properties properties = PropertiesLoaderUtils.loadProperties(
new ClassPathResource("project.properties"));
version = properties.getProperty("version");
} catch (IOException e) {
LOGGER.warn("Can not get version.");
}
return version;
} | class ApplicationId {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationId.class);
public static final String VERSION = getVersion();
public static final String AZURE_SPRING_KEY_VAULT = "az-sp-kv/" + VERSION;
public static final String AZURE_SPRING_SERVICE_BUS = "az-sp-bus/" + VERSION;
public static final String AZURE_SPRING_STORAGE_BLOB = "az-sp-sb/" + VERSION;
public static final String AZURE_SPRING_STORAGE_FILES = "az-sp-sf/" + VERSION;
/**
* AZURE_SPRING_AAD does not contain VERSION, because AAD server support 2 headers: 1. x-client-SKU; 2.
* x-client-VER;
*/
public static final String AZURE_SPRING_AAD = "az-sp-aad";
public static final String AZURE_SPRING_B2C = "az-sp-b2c";
} | class ApplicationId {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationId.class);
public static final String VERSION = getVersion();
public static final String AZURE_SPRING_KEY_VAULT = "az-sp-kv/" + VERSION;
public static final String AZURE_SPRING_SERVICE_BUS = "az-sp-bus/" + VERSION;
public static final String AZURE_SPRING_STORAGE_BLOB = "az-sp-sb/" + VERSION;
public static final String AZURE_SPRING_STORAGE_FILES = "az-sp-sf/" + VERSION;
/**
* AZURE_SPRING_AAD does not contain VERSION, because AAD server support 2 headers: 1. x-client-SKU; 2.
* x-client-VER;
*/
public static final String AZURE_SPRING_AAD = "az-sp-aad";
public static final String AZURE_SPRING_B2C = "az-sp-b2c";
} |
yes, that was just an usage example for you | private static String getVersion() {
String version = "unknown";
Properties properties = new Properties();
try (InputStream inputStream =
new FileInputStream(ResourceUtils.getFile("classpath:azure-spring-boot-version.txt"))) {
properties.load(inputStream);
version = properties.getProperty("version");
} catch (IOException e) {
LOGGER.warn("Can not get version.");
}
return version;
} | try (InputStream inputStream = | private static String getVersion() {
String version = "unknown";
try {
Properties properties = PropertiesLoaderUtils.loadProperties(
new ClassPathResource("project.properties"));
version = properties.getProperty("version");
} catch (IOException e) {
LOGGER.warn("Can not get version.");
}
return version;
} | class ApplicationId {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationId.class);
public static final String VERSION = getVersion();
public static final String AZURE_SPRING_KEY_VAULT = "az-sp-kv/" + VERSION;
public static final String AZURE_SPRING_SERVICE_BUS = "az-sp-bus/" + VERSION;
public static final String AZURE_SPRING_STORAGE_BLOB = "az-sp-sb/" + VERSION;
public static final String AZURE_SPRING_STORAGE_FILES = "az-sp-sf/" + VERSION;
/**
* AZURE_SPRING_AAD does not contain VERSION, because AAD server support 2 headers: 1. x-client-SKU; 2.
* x-client-VER;
*/
public static final String AZURE_SPRING_AAD = "az-sp-aad";
public static final String AZURE_SPRING_B2C = "az-sp-b2c";
} | class ApplicationId {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationId.class);
public static final String VERSION = getVersion();
public static final String AZURE_SPRING_KEY_VAULT = "az-sp-kv/" + VERSION;
public static final String AZURE_SPRING_SERVICE_BUS = "az-sp-bus/" + VERSION;
public static final String AZURE_SPRING_STORAGE_BLOB = "az-sp-sb/" + VERSION;
public static final String AZURE_SPRING_STORAGE_FILES = "az-sp-sf/" + VERSION;
/**
* AZURE_SPRING_AAD does not contain VERSION, because AAD server support 2 headers: 1. x-client-SKU; 2.
* x-client-VER;
*/
public static final String AZURE_SPRING_AAD = "az-sp-aad";
public static final String AZURE_SPRING_B2C = "az-sp-b2c";
} |
Given that CI is configured to always log INFO level messages from loggers prefixed with `com.azure` is it needed to log to the console as well? Both can be kept, but I'm thinking longer term we have a way to know if we're in a CI context and only log to the logger. | private static void printThreadStacks() {
final StringBuilder dump = new StringBuilder("============= THREAD DUMP START =========");
final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
final ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(threadMXBean.getAllThreadIds(), 100);
for (ThreadInfo threadInfo : threadInfos) {
dump.append('"');
dump.append(threadInfo.getThreadName());
dump.append("\" ");
final Thread.State state = threadInfo.getThreadState();
dump.append("\n java.lang.Thread.State: ");
dump.append(state);
final StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
for (final StackTraceElement stackTraceElement : stackTraceElements) {
dump.append("\n at ");
dump.append(stackTraceElement);
}
dump.append("\n\n");
}
dump.append("============= THREAD DUMP END =========");
String output = dump.toString();
System.out.println(output);
LOGGER.info(output);
} | System.out.println(output); | private static void printThreadStacks() {
final StringBuilder dump = new StringBuilder("============= THREAD DUMP START =========");
final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
final ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(threadMXBean.getAllThreadIds(), 100);
for (ThreadInfo threadInfo : threadInfos) {
dump.append('"');
dump.append(threadInfo.getThreadName());
dump.append("\" ");
final Thread.State state = threadInfo.getThreadState();
dump.append("\n java.lang.Thread.State: ");
dump.append(state);
final StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
for (final StackTraceElement stackTraceElement : stackTraceElements) {
dump.append("\n at ");
dump.append(stackTraceElement);
}
dump.append("\n\n");
}
dump.append("============= THREAD DUMP END =========");
String output = dump.toString();
System.out.println(output);
LOGGER.info(output);
} | class ThreadDumper {
private static final ClientLogger LOGGER = new ClientLogger(StorageSpec.class);
private static volatile ScheduledExecutorService executorService;
private static final int INITIAL_DELAY_IN_MINUTES = 30;
private static final int RATE_IN_MINUTES = 1;
public static void initialize() {
if (executorService == null) {
synchronized (ThreadDumper.class) {
if (executorService == null) {
executorService = Executors.newScheduledThreadPool(1, r -> {
Thread t = new Thread(r);
t.setDaemon(true);
return t;
});
Runtime.getRuntime().addShutdownHook(new Thread(
() -> executorService.shutdown()
));
executorService.scheduleAtFixedRate(
ThreadDumper::printThreadStacks,
INITIAL_DELAY_IN_MINUTES,
RATE_IN_MINUTES,
TimeUnit.MINUTES
);
}
}
}
}
} | class ThreadDumper {
private static final ClientLogger LOGGER = new ClientLogger(StorageSpec.class);
private static volatile ScheduledExecutorService executorService;
private static final int INITIAL_DELAY_IN_MINUTES = 30;
private static final int RATE_IN_MINUTES = 1;
public static void initialize() {
if (executorService == null) {
synchronized (ThreadDumper.class) {
if (executorService == null) {
executorService = Executors.newScheduledThreadPool(1, r -> {
Thread t = new Thread(r);
t.setDaemon(true);
return t;
});
Runtime.getRuntime().addShutdownHook(new Thread(
() -> executorService.shutdown()
));
executorService.scheduleAtFixedRate(
ThreadDumper::printThreadStacks,
INITIAL_DELAY_IN_MINUTES,
RATE_IN_MINUTES,
TimeUnit.MINUTES
);
}
}
}
}
} |
this is one of the things I'd like to see right away in devops portal without downloading file. (storage tests don't stream logger outputs to devops stdout at this time). console -> important things files -> all (including above). | private static void printThreadStacks() {
final StringBuilder dump = new StringBuilder("============= THREAD DUMP START =========");
final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
final ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(threadMXBean.getAllThreadIds(), 100);
for (ThreadInfo threadInfo : threadInfos) {
dump.append('"');
dump.append(threadInfo.getThreadName());
dump.append("\" ");
final Thread.State state = threadInfo.getThreadState();
dump.append("\n java.lang.Thread.State: ");
dump.append(state);
final StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
for (final StackTraceElement stackTraceElement : stackTraceElements) {
dump.append("\n at ");
dump.append(stackTraceElement);
}
dump.append("\n\n");
}
dump.append("============= THREAD DUMP END =========");
String output = dump.toString();
System.out.println(output);
LOGGER.info(output);
} | System.out.println(output); | private static void printThreadStacks() {
final StringBuilder dump = new StringBuilder("============= THREAD DUMP START =========");
final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
final ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(threadMXBean.getAllThreadIds(), 100);
for (ThreadInfo threadInfo : threadInfos) {
dump.append('"');
dump.append(threadInfo.getThreadName());
dump.append("\" ");
final Thread.State state = threadInfo.getThreadState();
dump.append("\n java.lang.Thread.State: ");
dump.append(state);
final StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
for (final StackTraceElement stackTraceElement : stackTraceElements) {
dump.append("\n at ");
dump.append(stackTraceElement);
}
dump.append("\n\n");
}
dump.append("============= THREAD DUMP END =========");
String output = dump.toString();
System.out.println(output);
LOGGER.info(output);
} | class ThreadDumper {
private static final ClientLogger LOGGER = new ClientLogger(StorageSpec.class);
private static volatile ScheduledExecutorService executorService;
private static final int INITIAL_DELAY_IN_MINUTES = 30;
private static final int RATE_IN_MINUTES = 1;
public static void initialize() {
if (executorService == null) {
synchronized (ThreadDumper.class) {
if (executorService == null) {
executorService = Executors.newScheduledThreadPool(1, r -> {
Thread t = new Thread(r);
t.setDaemon(true);
return t;
});
Runtime.getRuntime().addShutdownHook(new Thread(
() -> executorService.shutdown()
));
executorService.scheduleAtFixedRate(
ThreadDumper::printThreadStacks,
INITIAL_DELAY_IN_MINUTES,
RATE_IN_MINUTES,
TimeUnit.MINUTES
);
}
}
}
}
} | class ThreadDumper {
private static final ClientLogger LOGGER = new ClientLogger(StorageSpec.class);
private static volatile ScheduledExecutorService executorService;
private static final int INITIAL_DELAY_IN_MINUTES = 30;
private static final int RATE_IN_MINUTES = 1;
public static void initialize() {
if (executorService == null) {
synchronized (ThreadDumper.class) {
if (executorService == null) {
executorService = Executors.newScheduledThreadPool(1, r -> {
Thread t = new Thread(r);
t.setDaemon(true);
return t;
});
Runtime.getRuntime().addShutdownHook(new Thread(
() -> executorService.shutdown()
));
executorService.scheduleAtFixedRate(
ThreadDumper::printThreadStacks,
INITIAL_DELAY_IN_MINUTES,
RATE_IN_MINUTES,
TimeUnit.MINUTES
);
}
}
}
}
} |
Yes, there is a remove method: https://github.com/Azure/azure-sdk-for-java/pull/23888/files/129fffc730b6ceadce1d77d7f80b5016ef798140#diff-21b21d909779e219c34ba1995c680ce1781b4db6bfec0a728055e8532fe36aa1L160 | public HttpHeaders add(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
return this;
} else {
headers.compute(caseInsensitiveName, (key, header) -> {
if (header == null) {
return new HttpHeader(name, value);
} else {
header.addValue(value);
return header;
}
});
}
return this;
} | } else { | public HttpHeaders add(String name, String value) {
String caseInsensitiveName = formatKey(name);
if (caseInsensitiveName == null || value == null) {
return this;
}
headers.compute(caseInsensitiveName, (key, header) -> {
if (header == null) {
return new HttpHeader(name, value);
} else {
header.addValue(value);
return header;
}
});
return this;
} | class HttpHeaders implements Iterable<HttpHeader> {
private final Map<String, HttpHeader> headers;
/**
* Create an empty HttpHeaders instance.
*/
public HttpHeaders() {
headers = new HashMap<>();
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the map of initial headers
*/
public HttpHeaders(Map<String, String> headers) {
this.headers = new HashMap<>(headers.size());
headers.forEach(this::set);
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the collection of initial headers
*/
public HttpHeaders(Iterable<HttpHeader> headers) {
this.headers = new HashMap<>();
for (final HttpHeader header : headers) {
this.set(header.getName(), header.getValue());
}
}
/**
* Create a HttpHeaders instance with an initial {@code size} empty headers
*
* @param initialCapacity the initial capacity of headers map.
*/
public HttpHeaders(int initialCapacity) {
this.headers = new HashMap<>(initialCapacity);
}
/**
* Gets the number of headers in the collection.
*
* @return the number of headers in this collection.
*/
public int getSize() {
return headers.size();
}
/**
* Adds a {@link HttpHeader header} with the given name and value if a header with that name doesn't already exist,
* otherwise adds the {@code value} to the existing header.
*
* @param name The name of the header.
* @param value The value of the header.
* @return The updated HttpHeaders object.
*/
/**
* Sets a {@link HttpHeader header} with the given name and value.
*
* <p>If header with same name already exists then the value will be overwritten.</p>
*
* @param name the name
* @param value the value
* @return The updated HttpHeaders object
* @deprecated Use {@link
*/
@Deprecated
public HttpHeaders put(String name, String value) {
return set(name, value);
}
/**
* Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then
* the value will be overwritten. If the given value is null, the header with the given name will be removed.
*
* @param name the name to set in the header. If it is null, this method will return with no changes to the
* headers.
* @param value the value
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, value));
}
return this;
}
/**
* Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given values
* will be comma-separated when necessary. If a header with same name already exists then the values will be
* overwritten. If the given values list is null, the header with the given name will be removed.
*
* @param name the name
* @param values the values that will be comma-separated as appropriate
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, List<String> values) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (values == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, values));
}
return this;
}
/**
* Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling {@code
* headers.forEach(this::set)}, and therefore the behavior is as specified in {@link
* words, this will create a header for each key in the provided map, replacing or removing an existing one,
* depending on the value. If the given values list is null, the header with the given name will be removed. If the
* given name is already a header, it will be removed and replaced with the headers provided.
*
* @param headers a map containing keys representing header names, and keys representing the associated values.
* @return The updated HttpHeaders object
*/
public HttpHeaders setAll(Map<String, List<String>> headers) {
headers.forEach(this::set);
return this;
}
/**
* Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to find.
* @return the header if found, null otherwise.
*/
public HttpHeader get(String name) {
return headers.get(formatKey(name));
}
/**
* Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to remove.
* @return the header if removed, null otherwise.
*/
public HttpHeader remove(String name) {
return headers.remove(formatKey(name));
}
/**
* Get the value for the provided header name. {@code Null} is returned if the header name isn't found.
*
* @param name the name of the header whose value is being retrieved.
* @return the value of the header, or null if the header isn't found
*/
public String getValue(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValue();
}
/**
* Get the values for the provided header name. {@code Null} is returned if the header name isn't found.
*
* <p>This returns {@link
*
* @param name the name of the header whose value is being retrieved.
* @return the values of the header, or null if the header isn't found
*/
public String[] getValues(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValues();
}
private String formatKey(final String key) {
return key.toLowerCase(Locale.ROOT);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String> toMap() {
final Map<String, String> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValue());
}
return Collections.unmodifiableMap(result);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMultiMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String[]> toMultiMap() {
final Map<String, String[]> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValues());
}
return Collections.unmodifiableMap(result);
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<HttpHeader> iterator() {
return headers.values().iterator();
}
/**
* Get a {@link Stream} representation of the HttpHeader values in this instance.
*
* @return A {@link Stream} of all header values in this instance.
*/
public Stream<HttpHeader> stream() {
return headers.values().stream();
}
@Override
public String toString() {
return this.stream()
.map(header -> header.getName() + "=" + header.getValue())
.collect(Collectors.joining(", "));
}
} | class HttpHeaders implements Iterable<HttpHeader> {
private final Map<String, HttpHeader> headers;
/**
* Create an empty HttpHeaders instance.
*/
public HttpHeaders() {
headers = new HashMap<>();
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the map of initial headers
*/
public HttpHeaders(Map<String, String> headers) {
this.headers = new HashMap<>(headers.size());
headers.forEach(this::set);
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the collection of initial headers
*/
public HttpHeaders(Iterable<HttpHeader> headers) {
this.headers = new HashMap<>();
for (final HttpHeader header : headers) {
this.set(header.getName(), header.getValue());
}
}
/**
* Create a HttpHeaders instance with an initial {@code size} empty headers
*
* @param initialCapacity the initial capacity of headers map.
*/
public HttpHeaders(int initialCapacity) {
this.headers = new HashMap<>(initialCapacity);
}
/**
* Gets the number of headers in the collection.
*
* @return the number of headers in this collection.
*/
public int getSize() {
return headers.size();
}
/**
* Adds a {@link HttpHeader header} with the given name and value if a header with that name doesn't already exist,
* otherwise adds the {@code value} to the existing header.
*
* @param name The name of the header.
* @param value The value of the header.
* @return The updated HttpHeaders object.
*/
/**
* Sets a {@link HttpHeader header} with the given name and value.
*
* <p>If header with same name already exists then the value will be overwritten.</p>
*
* @param name the name
* @param value the value
* @return The updated HttpHeaders object
* @deprecated Use {@link
*/
@Deprecated
public HttpHeaders put(String name, String value) {
return set(name, value);
}
/**
* Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then
* the value will be overwritten. If the given value is null, the header with the given name will be removed.
*
* @param name the name to set in the header. If it is null, this method will return with no changes to the
* headers.
* @param value the value
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, value));
}
return this;
}
/**
* Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given values
* will be comma-separated when necessary. If a header with same name already exists then the values will be
* overwritten. If the given values list is null, the header with the given name will be removed.
*
* @param name the name
* @param values the values that will be comma-separated as appropriate
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, List<String> values) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (CoreUtils.isNullOrEmpty(values)) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, values));
}
return this;
}
/**
* Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling {@code
* headers.forEach(this::set)}, and therefore the behavior is as specified in {@link
* words, this will create a header for each key in the provided map, replacing or removing an existing one,
* depending on the value. If the given values list is null, the header with the given name will be removed. If the
* given name is already a header, it will be removed and replaced with the headers provided.
*
* @param headers a map containing keys representing header names, and keys representing the associated values.
* @return The updated HttpHeaders object
*/
public HttpHeaders setAll(Map<String, List<String>> headers) {
headers.forEach(this::set);
return this;
}
/**
* Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to find.
* @return the header if found, null otherwise.
*/
public HttpHeader get(String name) {
return headers.get(formatKey(name));
}
/**
* Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to remove.
* @return the header if removed, null otherwise.
*/
public HttpHeader remove(String name) {
return headers.remove(formatKey(name));
}
/**
* Get the value for the provided header name. {@code Null} is returned if the header name isn't found.
*
* @param name the name of the header whose value is being retrieved.
* @return the value of the header, or null if the header isn't found
*/
public String getValue(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValue();
}
/**
* Get the values for the provided header name. {@code Null} is returned if the header name isn't found.
*
* <p>This returns {@link
*
* @param name the name of the header whose value is being retrieved.
* @return the values of the header, or null if the header isn't found
*/
public String[] getValues(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValues();
}
private String formatKey(final String key) {
return (key == null) ? null : key.toLowerCase(Locale.ROOT);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String> toMap() {
final Map<String, String> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValue());
}
return Collections.unmodifiableMap(result);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMultiMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String[]> toMultiMap() {
final Map<String, String[]> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValues());
}
return Collections.unmodifiableMap(result);
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<HttpHeader> iterator() {
return headers.values().iterator();
}
/**
* Get a {@link Stream} representation of the HttpHeader values in this instance.
*
* @return A {@link Stream} of all header values in this instance.
*/
public Stream<HttpHeader> stream() {
return headers.values().stream();
}
@Override
public String toString() {
return this.stream()
.map(header -> header.getName() + "=" + header.getValue())
.collect(Collectors.joining(", "));
}
} |
we can combine this with the first null check - `if(name == null || value == null) return this;`. Saves formatting the name. | public HttpHeaders add(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
return this;
} else {
headers.compute(caseInsensitiveName, (key, header) -> {
if (header == null) {
return new HttpHeader(name, value);
} else {
header.addValue(value);
return header;
}
});
}
return this;
} | if (value == null) { | public HttpHeaders add(String name, String value) {
String caseInsensitiveName = formatKey(name);
if (caseInsensitiveName == null || value == null) {
return this;
}
headers.compute(caseInsensitiveName, (key, header) -> {
if (header == null) {
return new HttpHeader(name, value);
} else {
header.addValue(value);
return header;
}
});
return this;
} | class HttpHeaders implements Iterable<HttpHeader> {
private final Map<String, HttpHeader> headers;
/**
* Create an empty HttpHeaders instance.
*/
public HttpHeaders() {
headers = new HashMap<>();
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the map of initial headers
*/
public HttpHeaders(Map<String, String> headers) {
this.headers = new HashMap<>(headers.size());
headers.forEach(this::set);
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the collection of initial headers
*/
public HttpHeaders(Iterable<HttpHeader> headers) {
this.headers = new HashMap<>();
for (final HttpHeader header : headers) {
this.set(header.getName(), header.getValue());
}
}
/**
* Create a HttpHeaders instance with an initial {@code size} empty headers
*
* @param initialCapacity the initial capacity of headers map.
*/
public HttpHeaders(int initialCapacity) {
this.headers = new HashMap<>(initialCapacity);
}
/**
* Gets the number of headers in the collection.
*
* @return the number of headers in this collection.
*/
public int getSize() {
return headers.size();
}
/**
* Adds a {@link HttpHeader header} with the given name and value if a header with that name doesn't already exist,
* otherwise adds the {@code value} to the existing header.
*
* @param name The name of the header.
* @param value The value of the header.
* @return The updated HttpHeaders object.
*/
/**
* Sets a {@link HttpHeader header} with the given name and value.
*
* <p>If header with same name already exists then the value will be overwritten.</p>
*
* @param name the name
* @param value the value
* @return The updated HttpHeaders object
* @deprecated Use {@link
*/
@Deprecated
public HttpHeaders put(String name, String value) {
return set(name, value);
}
/**
* Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then
* the value will be overwritten. If the given value is null, the header with the given name will be removed.
*
* @param name the name to set in the header. If it is null, this method will return with no changes to the
* headers.
* @param value the value
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, value));
}
return this;
}
/**
* Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given values
* will be comma-separated when necessary. If a header with same name already exists then the values will be
* overwritten. If the given values list is null, the header with the given name will be removed.
*
* @param name the name
* @param values the values that will be comma-separated as appropriate
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, List<String> values) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (values == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, values));
}
return this;
}
/**
* Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling {@code
* headers.forEach(this::set)}, and therefore the behavior is as specified in {@link
* words, this will create a header for each key in the provided map, replacing or removing an existing one,
* depending on the value. If the given values list is null, the header with the given name will be removed. If the
* given name is already a header, it will be removed and replaced with the headers provided.
*
* @param headers a map containing keys representing header names, and keys representing the associated values.
* @return The updated HttpHeaders object
*/
public HttpHeaders setAll(Map<String, List<String>> headers) {
headers.forEach(this::set);
return this;
}
/**
* Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to find.
* @return the header if found, null otherwise.
*/
public HttpHeader get(String name) {
return headers.get(formatKey(name));
}
/**
* Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to remove.
* @return the header if removed, null otherwise.
*/
public HttpHeader remove(String name) {
return headers.remove(formatKey(name));
}
/**
* Get the value for the provided header name. {@code Null} is returned if the header name isn't found.
*
* @param name the name of the header whose value is being retrieved.
* @return the value of the header, or null if the header isn't found
*/
public String getValue(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValue();
}
/**
* Get the values for the provided header name. {@code Null} is returned if the header name isn't found.
*
* <p>This returns {@link
*
* @param name the name of the header whose value is being retrieved.
* @return the values of the header, or null if the header isn't found
*/
public String[] getValues(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValues();
}
private String formatKey(final String key) {
return key.toLowerCase(Locale.ROOT);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String> toMap() {
final Map<String, String> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValue());
}
return Collections.unmodifiableMap(result);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMultiMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String[]> toMultiMap() {
final Map<String, String[]> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValues());
}
return Collections.unmodifiableMap(result);
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<HttpHeader> iterator() {
return headers.values().iterator();
}
/**
* Get a {@link Stream} representation of the HttpHeader values in this instance.
*
* @return A {@link Stream} of all header values in this instance.
*/
public Stream<HttpHeader> stream() {
return headers.values().stream();
}
@Override
public String toString() {
return this.stream()
.map(header -> header.getName() + "=" + header.getValue())
.collect(Collectors.joining(", "));
}
} | class HttpHeaders implements Iterable<HttpHeader> {
private final Map<String, HttpHeader> headers;
/**
* Create an empty HttpHeaders instance.
*/
public HttpHeaders() {
headers = new HashMap<>();
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the map of initial headers
*/
public HttpHeaders(Map<String, String> headers) {
this.headers = new HashMap<>(headers.size());
headers.forEach(this::set);
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the collection of initial headers
*/
public HttpHeaders(Iterable<HttpHeader> headers) {
this.headers = new HashMap<>();
for (final HttpHeader header : headers) {
this.set(header.getName(), header.getValue());
}
}
/**
* Create a HttpHeaders instance with an initial {@code size} empty headers
*
* @param initialCapacity the initial capacity of headers map.
*/
public HttpHeaders(int initialCapacity) {
this.headers = new HashMap<>(initialCapacity);
}
/**
* Gets the number of headers in the collection.
*
* @return the number of headers in this collection.
*/
public int getSize() {
return headers.size();
}
/**
* Adds a {@link HttpHeader header} with the given name and value if a header with that name doesn't already exist,
* otherwise adds the {@code value} to the existing header.
*
* @param name The name of the header.
* @param value The value of the header.
* @return The updated HttpHeaders object.
*/
/**
* Sets a {@link HttpHeader header} with the given name and value.
*
* <p>If header with same name already exists then the value will be overwritten.</p>
*
* @param name the name
* @param value the value
* @return The updated HttpHeaders object
* @deprecated Use {@link
*/
@Deprecated
public HttpHeaders put(String name, String value) {
return set(name, value);
}
/**
* Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then
* the value will be overwritten. If the given value is null, the header with the given name will be removed.
*
* @param name the name to set in the header. If it is null, this method will return with no changes to the
* headers.
* @param value the value
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, value));
}
return this;
}
/**
* Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given values
* will be comma-separated when necessary. If a header with same name already exists then the values will be
* overwritten. If the given values list is null, the header with the given name will be removed.
*
* @param name the name
* @param values the values that will be comma-separated as appropriate
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, List<String> values) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (CoreUtils.isNullOrEmpty(values)) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, values));
}
return this;
}
/**
* Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling {@code
* headers.forEach(this::set)}, and therefore the behavior is as specified in {@link
* words, this will create a header for each key in the provided map, replacing or removing an existing one,
* depending on the value. If the given values list is null, the header with the given name will be removed. If the
* given name is already a header, it will be removed and replaced with the headers provided.
*
* @param headers a map containing keys representing header names, and keys representing the associated values.
* @return The updated HttpHeaders object
*/
public HttpHeaders setAll(Map<String, List<String>> headers) {
headers.forEach(this::set);
return this;
}
/**
* Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to find.
* @return the header if found, null otherwise.
*/
public HttpHeader get(String name) {
return headers.get(formatKey(name));
}
/**
* Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to remove.
* @return the header if removed, null otherwise.
*/
public HttpHeader remove(String name) {
return headers.remove(formatKey(name));
}
/**
* Get the value for the provided header name. {@code Null} is returned if the header name isn't found.
*
* @param name the name of the header whose value is being retrieved.
* @return the value of the header, or null if the header isn't found
*/
public String getValue(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValue();
}
/**
* Get the values for the provided header name. {@code Null} is returned if the header name isn't found.
*
* <p>This returns {@link
*
* @param name the name of the header whose value is being retrieved.
* @return the values of the header, or null if the header isn't found
*/
public String[] getValues(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValues();
}
private String formatKey(final String key) {
return (key == null) ? null : key.toLowerCase(Locale.ROOT);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String> toMap() {
final Map<String, String> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValue());
}
return Collections.unmodifiableMap(result);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMultiMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String[]> toMultiMap() {
final Map<String, String[]> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValues());
}
return Collections.unmodifiableMap(result);
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<HttpHeader> iterator() {
return headers.values().iterator();
}
/**
* Get a {@link Stream} representation of the HttpHeader values in this instance.
*
* @return A {@link Stream} of all header values in this instance.
*/
public Stream<HttpHeader> stream() {
return headers.values().stream();
}
@Override
public String toString() {
return this.stream()
.map(header -> header.getName() + "=" + header.getValue())
.collect(Collectors.joining(", "));
}
} |
You should be able to null check on the `name` itself, and move the if clause above the `formatKey` call. | public HttpHeaders add(String name, String value) {
String caseInsensitiveName = formatKey(name);
if (caseInsensitiveName == null || value == null) {
return this;
}
headers.compute(caseInsensitiveName, (key, header) -> {
if (header == null) {
return new HttpHeader(name, value);
} else {
header.addValue(value);
return header;
}
});
return this;
} | } | public HttpHeaders add(String name, String value) {
String caseInsensitiveName = formatKey(name);
if (caseInsensitiveName == null || value == null) {
return this;
}
headers.compute(caseInsensitiveName, (key, header) -> {
if (header == null) {
return new HttpHeader(name, value);
} else {
header.addValue(value);
return header;
}
});
return this;
} | class HttpHeaders implements Iterable<HttpHeader> {
private final Map<String, HttpHeader> headers;
/**
* Create an empty HttpHeaders instance.
*/
public HttpHeaders() {
headers = new HashMap<>();
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the map of initial headers
*/
public HttpHeaders(Map<String, String> headers) {
this.headers = new HashMap<>(headers.size());
headers.forEach(this::set);
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the collection of initial headers
*/
public HttpHeaders(Iterable<HttpHeader> headers) {
this.headers = new HashMap<>();
for (final HttpHeader header : headers) {
this.set(header.getName(), header.getValue());
}
}
/**
* Create a HttpHeaders instance with an initial {@code size} empty headers
*
* @param initialCapacity the initial capacity of headers map.
*/
public HttpHeaders(int initialCapacity) {
this.headers = new HashMap<>(initialCapacity);
}
/**
* Gets the number of headers in the collection.
*
* @return the number of headers in this collection.
*/
public int getSize() {
return headers.size();
}
/**
* Adds a {@link HttpHeader header} with the given name and value if a header with that name doesn't already exist,
* otherwise adds the {@code value} to the existing header.
*
* @param name The name of the header.
* @param value The value of the header.
* @return The updated HttpHeaders object.
*/
/**
* Sets a {@link HttpHeader header} with the given name and value.
*
* <p>If header with same name already exists then the value will be overwritten.</p>
*
* @param name the name
* @param value the value
* @return The updated HttpHeaders object
* @deprecated Use {@link
*/
@Deprecated
public HttpHeaders put(String name, String value) {
return set(name, value);
}
/**
* Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then
* the value will be overwritten. If the given value is null, the header with the given name will be removed.
*
* @param name the name to set in the header. If it is null, this method will return with no changes to the
* headers.
* @param value the value
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, value));
}
return this;
}
/**
* Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given values
* will be comma-separated when necessary. If a header with same name already exists then the values will be
* overwritten. If the given values list is null, the header with the given name will be removed.
*
* @param name the name
* @param values the values that will be comma-separated as appropriate
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, List<String> values) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (CoreUtils.isNullOrEmpty(values)) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, values));
}
return this;
}
/**
* Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling {@code
* headers.forEach(this::set)}, and therefore the behavior is as specified in {@link
* words, this will create a header for each key in the provided map, replacing or removing an existing one,
* depending on the value. If the given values list is null, the header with the given name will be removed. If the
* given name is already a header, it will be removed and replaced with the headers provided.
*
* @param headers a map containing keys representing header names, and keys representing the associated values.
* @return The updated HttpHeaders object
*/
public HttpHeaders setAll(Map<String, List<String>> headers) {
headers.forEach(this::set);
return this;
}
/**
* Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to find.
* @return the header if found, null otherwise.
*/
public HttpHeader get(String name) {
return headers.get(formatKey(name));
}
/**
* Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to remove.
* @return the header if removed, null otherwise.
*/
public HttpHeader remove(String name) {
return headers.remove(formatKey(name));
}
/**
* Get the value for the provided header name. {@code Null} is returned if the header name isn't found.
*
* @param name the name of the header whose value is being retrieved.
* @return the value of the header, or null if the header isn't found
*/
public String getValue(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValue();
}
/**
* Get the values for the provided header name. {@code Null} is returned if the header name isn't found.
*
* <p>This returns {@link
*
* @param name the name of the header whose value is being retrieved.
* @return the values of the header, or null if the header isn't found
*/
public String[] getValues(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValues();
}
private String formatKey(final String key) {
return (key == null) ? null : key.toLowerCase(Locale.ROOT);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String> toMap() {
final Map<String, String> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValue());
}
return Collections.unmodifiableMap(result);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMultiMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String[]> toMultiMap() {
final Map<String, String[]> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValues());
}
return Collections.unmodifiableMap(result);
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<HttpHeader> iterator() {
return headers.values().iterator();
}
/**
* Get a {@link Stream} representation of the HttpHeader values in this instance.
*
* @return A {@link Stream} of all header values in this instance.
*/
public Stream<HttpHeader> stream() {
return headers.values().stream();
}
@Override
public String toString() {
return this.stream()
.map(header -> header.getName() + "=" + header.getValue())
.collect(Collectors.joining(", "));
}
} | class HttpHeaders implements Iterable<HttpHeader> {
private final Map<String, HttpHeader> headers;
/**
* Create an empty HttpHeaders instance.
*/
public HttpHeaders() {
headers = new HashMap<>();
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the map of initial headers
*/
public HttpHeaders(Map<String, String> headers) {
this.headers = new HashMap<>(headers.size());
headers.forEach(this::set);
}
/**
* Create a HttpHeaders instance with the provided initial headers.
*
* @param headers the collection of initial headers
*/
public HttpHeaders(Iterable<HttpHeader> headers) {
this.headers = new HashMap<>();
for (final HttpHeader header : headers) {
this.set(header.getName(), header.getValue());
}
}
/**
* Create a HttpHeaders instance with an initial {@code size} empty headers
*
* @param initialCapacity the initial capacity of headers map.
*/
public HttpHeaders(int initialCapacity) {
this.headers = new HashMap<>(initialCapacity);
}
/**
* Gets the number of headers in the collection.
*
* @return the number of headers in this collection.
*/
public int getSize() {
return headers.size();
}
/**
* Adds a {@link HttpHeader header} with the given name and value if a header with that name doesn't already exist,
* otherwise adds the {@code value} to the existing header.
*
* @param name The name of the header.
* @param value The value of the header.
* @return The updated HttpHeaders object.
*/
/**
* Sets a {@link HttpHeader header} with the given name and value.
*
* <p>If header with same name already exists then the value will be overwritten.</p>
*
* @param name the name
* @param value the value
* @return The updated HttpHeaders object
* @deprecated Use {@link
*/
@Deprecated
public HttpHeaders put(String name, String value) {
return set(name, value);
}
/**
* Sets a {@link HttpHeader header} with the given name and value. If a header with same name already exists then
* the value will be overwritten. If the given value is null, the header with the given name will be removed.
*
* @param name the name to set in the header. If it is null, this method will return with no changes to the
* headers.
* @param value the value
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, String value) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (value == null) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, value));
}
return this;
}
/**
* Sets a {@link HttpHeader header} with the given name and the list of values provided, such that the given values
* will be comma-separated when necessary. If a header with same name already exists then the values will be
* overwritten. If the given values list is null, the header with the given name will be removed.
*
* @param name the name
* @param values the values that will be comma-separated as appropriate
* @return The updated HttpHeaders object
*/
public HttpHeaders set(String name, List<String> values) {
if (name == null) {
return this;
}
String caseInsensitiveName = formatKey(name);
if (CoreUtils.isNullOrEmpty(values)) {
remove(caseInsensitiveName);
} else {
headers.put(caseInsensitiveName, new HttpHeader(name, values));
}
return this;
}
/**
* Sets all provided header key/values pairs into this HttpHeaders instance. This is equivalent to calling {@code
* headers.forEach(this::set)}, and therefore the behavior is as specified in {@link
* words, this will create a header for each key in the provided map, replacing or removing an existing one,
* depending on the value. If the given values list is null, the header with the given name will be removed. If the
* given name is already a header, it will be removed and replaced with the headers provided.
*
* @param headers a map containing keys representing header names, and keys representing the associated values.
* @return The updated HttpHeaders object
*/
public HttpHeaders setAll(Map<String, List<String>> headers) {
headers.forEach(this::set);
return this;
}
/**
* Gets the {@link HttpHeader header} for the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to find.
* @return the header if found, null otherwise.
*/
public HttpHeader get(String name) {
return headers.get(formatKey(name));
}
/**
* Removes the {@link HttpHeader header} with the provided header name. {@code Null} is returned if the header isn't
* found.
*
* @param name the name of the header to remove.
* @return the header if removed, null otherwise.
*/
public HttpHeader remove(String name) {
return headers.remove(formatKey(name));
}
/**
* Get the value for the provided header name. {@code Null} is returned if the header name isn't found.
*
* @param name the name of the header whose value is being retrieved.
* @return the value of the header, or null if the header isn't found
*/
public String getValue(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValue();
}
/**
* Get the values for the provided header name. {@code Null} is returned if the header name isn't found.
*
* <p>This returns {@link
*
* @param name the name of the header whose value is being retrieved.
* @return the values of the header, or null if the header isn't found
*/
public String[] getValues(String name) {
final HttpHeader header = get(name);
return header == null ? null : header.getValues();
}
private String formatKey(final String key) {
return (key == null) ? null : key.toLowerCase(Locale.ROOT);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String> toMap() {
final Map<String, String> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValue());
}
return Collections.unmodifiableMap(result);
}
/**
* Returns a copy of the http headers as an unmodifiable {@link Map} representation of the state of the headers at
* the time of the toMultiMap call. This map will not change as the underlying http headers change, and nor will
* modifying the key or values contained in the map have any effect on the state of the http headers.
*
* <p>Note that there may be performance implications of using Map APIs on the returned Map. It is highly
* recommended that users prefer to use alternate APIs present on the HttpHeaders class, over using APIs present on
* the returned Map class. For example, use the {@link
* httpHeaders.toMap().get(name)}.</p>
*
* @return the headers in a copied and unmodifiable form.
*/
public Map<String, String[]> toMultiMap() {
final Map<String, String[]> result = new HashMap<>();
for (final HttpHeader header : headers.values()) {
result.put(header.getName(), header.getValues());
}
return Collections.unmodifiableMap(result);
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<HttpHeader> iterator() {
return headers.values().iterator();
}
/**
* Get a {@link Stream} representation of the HttpHeader values in this instance.
*
* @return A {@link Stream} of all header values in this instance.
*/
public Stream<HttpHeader> stream() {
return headers.values().stream();
}
@Override
public String toString() {
return this.stream()
.map(header -> header.getName() + "=" + header.getValue())
.collect(Collectors.joining(", "));
}
} |
Hello world! ??? | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Hello world! Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | String body = String.format("Hello world! Number: %s", number); | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
sample in azure-messaging-eventhubs uses 'Hello World' as event body as well, so I keep the name. We can change it to `String body = String.format("Custom event number: %s", number);` | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Hello world! Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | String body = String.format("Hello world! Number: %s", number); | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
`EventHubsManager` seems not needed? `AzureResourceManager` should contains it. Also you can see if they can share a same `HttpClient` instance. | public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
} | .authenticate(credential, profile); | public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Should use the `region` here? | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create(); | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Maybe you can add some comment about partition size. | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | .withPartitionCount(1) | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Ok, `.subscribe` is fine. This part actually happen after the `sendEvent`. You cannot block here. | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | .subscribe(partitionEvent -> { | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
This consuming part is referred to this sample: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/eventhubs/azure-messaging-eventhubs/src/samples/java/com/azure/messaging/eventhubs/ConsumeEvents.java | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | .subscribe(partitionEvent -> { | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
updated | public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
} | .authenticate(credential, profile); | public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
updated | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create(); | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
updated | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(Region.US_EAST).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = eventHubsManager.namespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | .withPartitionCount(1) | private static void runSample() {
try {
ResourceGroup resourceGroup =
resourceManager.resourceGroups().define(resourceGroupName).withRegion(region).create();
System.out.println("Resource group created with name " + resourceGroupName);
EventHubNamespace namespace = resourceManager.eventHubNamespaces()
.define(eventHubNamespace)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.withAutoScaling()
.withSku(EventHubNamespaceSkuType.STANDARD)
.create();
System.out.println("EventHub namespace created with name " + namespace.name());
EventHub eventHub = resourceManager.eventHubs()
.define(eventHubName)
.withExistingNamespace(resourceGroupName, eventHubNamespace)
.withNewManageRule(eventHubRuleName)
.withPartitionCount(1)
.create();
System.out.println("EventHub created with name " + eventHub.name());
Topic eventGridTopic = eventGridManager.topics()
.define(topicName)
.withRegion(region)
.withExistingResourceGroup(resourceGroupName)
.create();
System.out.println("EventGrid topic created with name " + eventGridTopic.name());
EventSubscription eventSubscription = eventGridManager.eventSubscriptions()
.define(eventSubscriptionName)
.withExistingScope(eventGridTopic.id())
.withDestination(new EventHubEventSubscriptionDestination()
.withResourceId(eventHub.id()))
.withFilter(new EventSubscriptionFilter()
.withIsSubjectCaseSensitive(false)
.withSubjectBeginsWith("")
.withSubjectEndsWith(""))
.create();
System.out.println("EventGrid event subscription created with name " + eventSubscription.name());
String eventGridClientConnectionKey = eventGridManager.topics().listSharedAccessKeys(resourceGroupName, topicName).key1();
System.out.format("Found EventGrid client connection key \"%s\" for endpoint \"%s\"\n", eventGridClientConnectionKey, eventGridTopic.endpoint());
EventGridPublisherClient<EventGridEvent> eventGridPublisherClient = new EventGridPublisherClientBuilder()
.endpoint(eventGridTopic.endpoint())
.credential(new AzureKeyCredential(eventGridClientConnectionKey))
.buildEventGridEventPublisherClient();
System.out.println("Done creating event grid publisher client.");
String connectionString = eventHub.listAuthorizationRules().stream().findFirst().get().getKeys().primaryConnectionString();
System.out.format("Event hub connection string: %s%n", connectionString);
EventHubConsumerAsyncClient consumer = new EventHubClientBuilder()
.connectionString(connectionString)
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.buildAsyncConsumerClient();
System.out.println("Done creating event hub consumer client.");
String firstPartition = consumer.getPartitionIds().blockFirst(OPERATION_TIMEOUT);
if (firstPartition == null) {
firstPartition = "0";
}
CountDownLatch countDownLatch = new CountDownLatch(NUMBER_OF_EVENTS);
Disposable subscription = consumer.receiveFromPartition(firstPartition, EventPosition.latest())
.subscribe(partitionEvent -> {
EventData eventData = partitionEvent.getData();
String contents = new String(eventData.getBody(), UTF_8);
countDownLatch.countDown();
System.out.printf("Event received. Event sequence number number: %s. Contents: %s%n",
eventData.getSequenceNumber(), contents);
},
error -> {
System.err.println("Error occurred while consuming events: " + error);
while (countDownLatch.getCount() > 0) {
countDownLatch.countDown();
}
}, () -> {
System.out.println("Finished reading events.");
});
Flux.range(0, NUMBER_OF_EVENTS)
.doOnNext(number -> {
String body = String.format("Custom Event Number: %s", number);
EventGridEvent event = new EventGridEvent("com/example/MyApp", "User.Created.Text", BinaryData.fromObject(body), "0.1");
eventGridPublisherClient.sendEvent(event);
System.out.format("Done publishing event: %s.%n", body);
})
.doOnComplete(() -> System.out.println("Done publishing events using event grid."))
.blockLast();
subscription.dispose();
consumer.close();
boolean isSuccessful = countDownLatch.await(OPERATION_TIMEOUT.getSeconds(), TimeUnit.SECONDS);
if (!isSuccessful) {
System.err.printf("Did not complete successfully. There are: %s events left.%n",
countDownLatch.getCount());
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static EventHubsManager eventHubsManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
resourceManager = AzureResourceManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventHubsManager = EventHubsManager.configure()
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile);
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class EventGridPublishAndConsumeExample {
private static AzureResourceManager resourceManager;
private static EventGridManager eventGridManager;
private static final Random RANDOM = new Random();
private static final int NUMBER_OF_EVENTS = 10;
private static final Region region = Region.US_WEST2;
private static final String resourceGroupName = "rg" + randomPadding();
private static final String eventHubName = "eh" + randomPadding();
private static final String eventHubNamespace = "eh-namespace" + randomPadding();
private static final String topicName = "my-topic-name" + randomPadding();
private static final String eventSubscriptionName = "event-subscription" + randomPadding();
private static final String eventHubRuleName = "my-management-rule" + randomPadding();
/**
* Main entry point.
*
* @param args the parameters
*/
public static void main(String[] args) {
try {
TokenCredential credential = new EnvironmentCredentialBuilder()
.authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)
.build();
AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
HttpClient httpClient = HttpClient.createDefault();
resourceManager = AzureResourceManager.configure()
.withHttpClient(httpClient)
.withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.authenticate(credential, profile)
.withDefaultSubscription();
eventGridManager = EventGridManager.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.withHttpClient(httpClient)
.authenticate(credential, profile);
runSample();
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
trying to understand the `else` branch. in which scenario we go to the `else` branch? `itemBatchOperation.getItem() != null` which scenario is this? is this for `delete` and `read` only? | public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch, CosmosBatchRequestOptions requestOptions) {
if (requestOptions == null) {
requestOptions = new CosmosBatchRequestOptions();
}
List<Mono<ItemBatchOperation<?>>> monoList = new ArrayList<>();
for (ItemBatchOperation<?> itemBatchOperation : this.cosmosBatchAccessor.getOperationsInternal(cosmosBatch)) {
Mono<ItemBatchOperation<?>> itemBatchOperationMono = null;
if (itemBatchOperation.getItem() != null) {
ObjectNode objectNode =
EncryptionUtils.getSimpleObjectMapper().valueToTree(itemBatchOperation.getItem());
itemBatchOperationMono =
encryptionProcessor.encryptObjectNode(objectNode).map(encryptedItem -> {
return new ItemBatchOperation(
itemBatchOperation.getOperationType(),
itemBatchOperation.getId(),
itemBatchOperation.getPartitionKeyValue(),
itemBatchOperation.getRequestOptions(),
encryptedItem
);
});
} else {
itemBatchOperationMono =
Mono.just(
new ItemBatchOperation(
itemBatchOperation.getOperationType(),
itemBatchOperation.getId(),
itemBatchOperation.getPartitionKeyValue(),
itemBatchOperation.getRequestOptions(),
null
)
);
}
monoList.add(itemBatchOperationMono);
}
Mono<List<ItemBatchOperation<?>>> encryptedOperationListMono =
Flux.mergeSequential(monoList).collectList();
CosmosBatchRequestOptions finalRequestOptions = requestOptions;
CosmosBatch encryptedCosmosBatch = CosmosBatch.createCosmosBatch(cosmosBatch.getPartitionKeyValue());
return encryptedOperationListMono.flatMap(itemBatchOperations -> {
this.cosmosBatchAccessor.getOperationsInternal(encryptedCosmosBatch).addAll(itemBatchOperations);
return executeCosmosBatchHelper(encryptedCosmosBatch, finalRequestOptions, false);
});
} | } else { | public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch, CosmosBatchRequestOptions requestOptions) {
if (requestOptions == null) {
requestOptions = new CosmosBatchRequestOptions();
}
List<Mono<ItemBatchOperation<?>>> monoList = new ArrayList<>();
for (ItemBatchOperation<?> itemBatchOperation : this.cosmosBatchAccessor.getOperationsInternal(cosmosBatch)) {
Mono<ItemBatchOperation<?>> itemBatchOperationMono = null;
if (itemBatchOperation.getItem() != null) {
ObjectNode objectNode =
EncryptionUtils.getSimpleObjectMapper().valueToTree(itemBatchOperation.getItem());
itemBatchOperationMono =
encryptionProcessor.encryptObjectNode(objectNode).map(encryptedItem -> {
return new ItemBatchOperation<>(
itemBatchOperation.getOperationType(),
itemBatchOperation.getId(),
itemBatchOperation.getPartitionKeyValue(),
itemBatchOperation.getRequestOptions(),
encryptedItem
);
});
} else {
itemBatchOperationMono =
Mono.just(
new ItemBatchOperation<>(
itemBatchOperation.getOperationType(),
itemBatchOperation.getId(),
itemBatchOperation.getPartitionKeyValue(),
itemBatchOperation.getRequestOptions(),
null
)
);
}
monoList.add(itemBatchOperationMono);
}
Mono<List<ItemBatchOperation<?>>> encryptedOperationListMono =
Flux.mergeSequential(monoList).collectList();
CosmosBatchRequestOptions finalRequestOptions = requestOptions;
CosmosBatch encryptedCosmosBatch = CosmosBatch.createCosmosBatch(cosmosBatch.getPartitionKeyValue());
return encryptedOperationListMono.flatMap(itemBatchOperations -> {
this.cosmosBatchAccessor.getOperationsInternal(encryptedCosmosBatch).addAll(itemBatchOperations);
return executeCosmosBatchHelper(encryptedCosmosBatch, finalRequestOptions, false);
});
} | class type.
* @return a {@link CosmosPagedFlux} | class type.
* @return a {@link CosmosPagedFlux} |
Yes read and delete do not have the item object in itemBatchOperation | public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch, CosmosBatchRequestOptions requestOptions) {
if (requestOptions == null) {
requestOptions = new CosmosBatchRequestOptions();
}
List<Mono<ItemBatchOperation<?>>> monoList = new ArrayList<>();
for (ItemBatchOperation<?> itemBatchOperation : this.cosmosBatchAccessor.getOperationsInternal(cosmosBatch)) {
Mono<ItemBatchOperation<?>> itemBatchOperationMono = null;
if (itemBatchOperation.getItem() != null) {
ObjectNode objectNode =
EncryptionUtils.getSimpleObjectMapper().valueToTree(itemBatchOperation.getItem());
itemBatchOperationMono =
encryptionProcessor.encryptObjectNode(objectNode).map(encryptedItem -> {
return new ItemBatchOperation(
itemBatchOperation.getOperationType(),
itemBatchOperation.getId(),
itemBatchOperation.getPartitionKeyValue(),
itemBatchOperation.getRequestOptions(),
encryptedItem
);
});
} else {
itemBatchOperationMono =
Mono.just(
new ItemBatchOperation(
itemBatchOperation.getOperationType(),
itemBatchOperation.getId(),
itemBatchOperation.getPartitionKeyValue(),
itemBatchOperation.getRequestOptions(),
null
)
);
}
monoList.add(itemBatchOperationMono);
}
Mono<List<ItemBatchOperation<?>>> encryptedOperationListMono =
Flux.mergeSequential(monoList).collectList();
CosmosBatchRequestOptions finalRequestOptions = requestOptions;
CosmosBatch encryptedCosmosBatch = CosmosBatch.createCosmosBatch(cosmosBatch.getPartitionKeyValue());
return encryptedOperationListMono.flatMap(itemBatchOperations -> {
this.cosmosBatchAccessor.getOperationsInternal(encryptedCosmosBatch).addAll(itemBatchOperations);
return executeCosmosBatchHelper(encryptedCosmosBatch, finalRequestOptions, false);
});
} | } else { | public Mono<CosmosBatchResponse> executeCosmosBatch(CosmosBatch cosmosBatch, CosmosBatchRequestOptions requestOptions) {
if (requestOptions == null) {
requestOptions = new CosmosBatchRequestOptions();
}
List<Mono<ItemBatchOperation<?>>> monoList = new ArrayList<>();
for (ItemBatchOperation<?> itemBatchOperation : this.cosmosBatchAccessor.getOperationsInternal(cosmosBatch)) {
Mono<ItemBatchOperation<?>> itemBatchOperationMono = null;
if (itemBatchOperation.getItem() != null) {
ObjectNode objectNode =
EncryptionUtils.getSimpleObjectMapper().valueToTree(itemBatchOperation.getItem());
itemBatchOperationMono =
encryptionProcessor.encryptObjectNode(objectNode).map(encryptedItem -> {
return new ItemBatchOperation<>(
itemBatchOperation.getOperationType(),
itemBatchOperation.getId(),
itemBatchOperation.getPartitionKeyValue(),
itemBatchOperation.getRequestOptions(),
encryptedItem
);
});
} else {
itemBatchOperationMono =
Mono.just(
new ItemBatchOperation<>(
itemBatchOperation.getOperationType(),
itemBatchOperation.getId(),
itemBatchOperation.getPartitionKeyValue(),
itemBatchOperation.getRequestOptions(),
null
)
);
}
monoList.add(itemBatchOperationMono);
}
Mono<List<ItemBatchOperation<?>>> encryptedOperationListMono =
Flux.mergeSequential(monoList).collectList();
CosmosBatchRequestOptions finalRequestOptions = requestOptions;
CosmosBatch encryptedCosmosBatch = CosmosBatch.createCosmosBatch(cosmosBatch.getPartitionKeyValue());
return encryptedOperationListMono.flatMap(itemBatchOperations -> {
this.cosmosBatchAccessor.getOperationsInternal(encryptedCosmosBatch).addAll(itemBatchOperations);
return executeCosmosBatchHelper(encryptedCosmosBatch, finalRequestOptions, false);
});
} | class type.
* @return a {@link CosmosPagedFlux} | class type.
* @return a {@link CosmosPagedFlux} |
could you validate the number of the returned count? | public void queryItemsAggregate() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, EncryptionPojo.class);
List<EncryptionPojo> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
assertThat(feedResponse1.size()).isGreaterThanOrEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isGreaterThanOrEqualTo(1);
} | } | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<Integer> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, Integer.class);
List<Integer> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0);
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<Integer> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, Integer.class);
List<Integer> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
String query3 = String.format("Select value max(c.sensitiveString) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions3 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec3 = new SqlQuerySpec(query3);
CosmosPagedFlux<String> feedResponseIterator3 =
cosmosEncryptionAsyncContainer.queryItems(querySpec3, cosmosQueryRequestOptions3, String.class);
List<String> feedResponse3 = feedResponseIterator3.byPage().blockFirst().getResults();
assertThat(feedResponse3.size()).isEqualTo(1);
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, priority = 1, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} |
Added the validation | public void queryItemsAggregate() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, EncryptionPojo.class);
List<EncryptionPojo> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
assertThat(feedResponse1.size()).isGreaterThanOrEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isGreaterThanOrEqualTo(1);
} | } | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<Integer> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, Integer.class);
List<Integer> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0);
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<Integer> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, Integer.class);
List<Integer> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
String query3 = String.format("Select value max(c.sensitiveString) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions3 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec3 = new SqlQuerySpec(query3);
CosmosPagedFlux<String> feedResponseIterator3 =
cosmosEncryptionAsyncContainer.queryItems(querySpec3, cosmosQueryRequestOptions3, String.class);
List<String> feedResponse3 = feedResponseIterator3.byPage().blockFirst().getResults();
assertThat(feedResponse3.size()).isEqualTo(1);
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, priority = 1, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} |
codestyle: please use assertJ assertion, not testng to be consistent with the rest of the tests. | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, EncryptionPojo.class);
List<EncryptionPojo> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
long endTime = Instant.now().getEpochSecond();
int timeStamp = 0;
for (Object pojo: feedResponse1) {
timeStamp = Integer.parseInt(pojo.toString());
}
Assert.assertTrue(timeStamp > startTime);
Assert.assertTrue(timeStamp <= endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
} | Assert.assertTrue(timeStamp > startTime); | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<Integer> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, Integer.class);
List<Integer> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0);
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<Integer> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, Integer.class);
List<Integer> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
String query3 = String.format("Select value max(c.sensitiveString) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions3 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec3 = new SqlQuerySpec(query3);
CosmosPagedFlux<String> feedResponseIterator3 =
cosmosEncryptionAsyncContainer.queryItems(querySpec3, cosmosQueryRequestOptions3, String.class);
List<String> feedResponse3 = feedResponseIterator3.byPage().blockFirst().getResults();
assertThat(feedResponse3.size()).isEqualTo(1);
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, priority = 1, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} |
ditto | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, EncryptionPojo.class);
List<EncryptionPojo> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
long endTime = Instant.now().getEpochSecond();
int timeStamp = 0;
for (Object pojo: feedResponse1) {
timeStamp = Integer.parseInt(pojo.toString());
}
Assert.assertTrue(timeStamp > startTime);
Assert.assertTrue(timeStamp <= endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
} | Assert.assertTrue(timeStamp <= endTime); | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<Integer> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, Integer.class);
List<Integer> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0);
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<Integer> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, Integer.class);
List<Integer> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
String query3 = String.format("Select value max(c.sensitiveString) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions3 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec3 = new SqlQuerySpec(query3);
CosmosPagedFlux<String> feedResponseIterator3 =
cosmosEncryptionAsyncContainer.queryItems(querySpec3, cosmosQueryRequestOptions3, String.class);
List<String> feedResponse3 = feedResponseIterator3.byPage().blockFirst().getResults();
assertThat(feedResponse3.size()).isEqualTo(1);
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, priority = 1, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} |
README.md should also be updated to use the new tags instead of line numbers. | public void createClient() {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(credential)
.buildClient();
} | public void createClient() {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(credential)
.buildClient();
} | class ReadmeSamples {
private String endpoint = "endpoint";
public void createAsyncClient() {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryAsyncClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(credential)
.buildAsyncClient();
}
public void listRepositoryNamesSample() {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(credential)
.buildClient();
client.listRepositoryNames().forEach(repository -> System.out.println(repository));
}
private final String repositoryName = "repository";
public void getPropertiesThrows() {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
ContainerRepository containerRepository = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(credential)
.buildClient()
.getRepository(repositoryName);
try {
containerRepository.getProperties();
} catch (HttpResponseException exception) {
}
}
public void createAnonymousAccessClient() {
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.buildClient();
}
public void createAnonymousAccessAsyncClient() {
ContainerRegistryAsyncClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.buildAsyncClient();
}
public void deleteImages() {
TokenCredential defaultCredential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(defaultCredential)
.buildClient();
final int imagesCountToKeep = 3;
for (String repositoryName : client.listRepositoryNames()) {
final ContainerRepository repository = client.getRepository(repositoryName);
PagedIterable<ArtifactManifestProperties> imageManifests =
repository.listManifestProperties(
ArtifactManifestOrderBy.LAST_UPDATED_ON_DESCENDING,
Context.NONE);
imageManifests.stream().skip(imagesCountToKeep)
.forEach(imageManifest -> {
System.out.printf(String.format("Deleting image with digest %s.%n", imageManifest.getDigest()));
System.out.printf(" This image has the following tags: ");
for (String tagName : imageManifest.getTags()) {
System.out.printf(" %s:%s", imageManifest.getRepositoryName(), tagName);
}
repository.getArtifact(imageManifest.getDigest()).delete();
});
}
}
private String tag = "tag";
public void setArtifactProperties() {
TokenCredential defaultCredential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(defaultCredential)
.buildClient();
RegistryArtifact image = client.getArtifact(repositoryName, digest);
image.updateTagProperties(
tag,
new ArtifactTagProperties()
.setWriteEnabled(false)
.setDeleteEnabled(false));
}
private final String architecture = "architecture";
private final String os = "os";
private final String digest = "digest";
public void listTagProperties() {
ContainerRegistryClient anonymousClient = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.buildClient();
RegistryArtifact image = anonymousClient.getArtifact(repositoryName, digest);
PagedIterable<ArtifactTagProperties> tags = image.listTagProperties();
System.out.printf(String.format("%s has the following aliases:", image.getFullyQualifiedReference()));
for (ArtifactTagProperties tag : tags) {
System.out.printf(String.format("%s/%s:%s", anonymousClient.getEndpoint(), repositoryName, tag.getName()));
}
}
public void anonymousClientThrows() {
final String endpoint = getEndpoint();
final String repositoryName = getRepositoryName();
ContainerRegistryClient anonymousClient = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.buildClient();
try {
anonymousClient.deleteRepository(repositoryName);
System.out.println("Unexpected Success: Delete is not allowed on anonymous access");
} catch (ClientAuthenticationException ex) {
System.out.println("Expected exception: Delete is not allowed on anonymous access");
}
}
private static String getEndpoint() {
return null;
}
private static String getRepositoryName() {
return null;
}
private static String getTagName() {
return null;
}
private final TokenCredential credentials = null;
public void nationalCloudSample() {
ContainerRegistryClient containerRegistryClient = new ContainerRegistryClientBuilder()
.endpoint(getEndpoint())
.credential(credentials)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_CHINA)
.buildClient();
containerRegistryClient
.listRepositoryNames()
.forEach(name -> System.out.println(name));
}
} | class ReadmeSamples {
private String endpoint = "endpoint";
public void createAsyncClient() {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryAsyncClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(credential)
.buildAsyncClient();
}
public void listRepositoryNamesSample() {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(credential)
.buildClient();
client.listRepositoryNames().forEach(repository -> System.out.println(repository));
}
private final String repositoryName = "repository";
public void getPropertiesThrows() {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
ContainerRepository containerRepository = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(credential)
.buildClient()
.getRepository(repositoryName);
try {
containerRepository.getProperties();
} catch (HttpResponseException exception) {
}
}
public void createAnonymousAccessClient() {
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.buildClient();
}
public void createAnonymousAccessAsyncClient() {
ContainerRegistryAsyncClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.buildAsyncClient();
}
public void deleteImages() {
TokenCredential defaultCredential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(defaultCredential)
.buildClient();
final int imagesCountToKeep = 3;
for (String repositoryName : client.listRepositoryNames()) {
final ContainerRepository repository = client.getRepository(repositoryName);
PagedIterable<ArtifactManifestProperties> imageManifests =
repository.listManifestProperties(
ArtifactManifestOrderBy.LAST_UPDATED_ON_DESCENDING,
Context.NONE);
imageManifests.stream().skip(imagesCountToKeep)
.forEach(imageManifest -> {
System.out.printf(String.format("Deleting image with digest %s.%n", imageManifest.getDigest()));
System.out.printf(" This image has the following tags: ");
for (String tagName : imageManifest.getTags()) {
System.out.printf(" %s:%s", imageManifest.getRepositoryName(), tagName);
}
repository.getArtifact(imageManifest.getDigest()).delete();
});
}
}
private String tag = "tag";
public void setArtifactProperties() {
TokenCredential defaultCredential = new DefaultAzureCredentialBuilder().build();
ContainerRegistryClient client = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.credential(defaultCredential)
.buildClient();
RegistryArtifact image = client.getArtifact(repositoryName, digest);
image.updateTagProperties(
tag,
new ArtifactTagProperties()
.setWriteEnabled(false)
.setDeleteEnabled(false));
}
private final String architecture = "architecture";
private final String os = "os";
private final String digest = "digest";
public void listTagProperties() {
ContainerRegistryClient anonymousClient = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.buildClient();
RegistryArtifact image = anonymousClient.getArtifact(repositoryName, digest);
PagedIterable<ArtifactTagProperties> tags = image.listTagProperties();
System.out.printf(String.format("%s has the following aliases:", image.getFullyQualifiedReference()));
for (ArtifactTagProperties tag : tags) {
System.out.printf(String.format("%s/%s:%s", anonymousClient.getEndpoint(), repositoryName, tag.getName()));
}
}
public void anonymousClientThrows() {
final String endpoint = getEndpoint();
final String repositoryName = getRepositoryName();
ContainerRegistryClient anonymousClient = new ContainerRegistryClientBuilder()
.endpoint(endpoint)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD)
.buildClient();
try {
anonymousClient.deleteRepository(repositoryName);
System.out.println("Unexpected Success: Delete is not allowed on anonymous access");
} catch (ClientAuthenticationException ex) {
System.out.println("Expected exception: Delete is not allowed on anonymous access");
}
}
private static String getEndpoint() {
return null;
}
private static String getRepositoryName() {
return null;
}
private static String getTagName() {
return null;
}
private final TokenCredential credentials = null;
public void nationalCloudSample() {
ContainerRegistryClient containerRegistryClient = new ContainerRegistryClientBuilder()
.endpoint(getEndpoint())
.credential(credentials)
.audience(ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_CHINA)
.buildClient();
containerRegistryClient
.listRepositoryNames()
.forEach(name -> System.out.println(name));
}
} | |
Removed, and added the correct assertion usage to be consistent with the rest of the test cases. | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, EncryptionPojo.class);
List<EncryptionPojo> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
long endTime = Instant.now().getEpochSecond();
int timeStamp = 0;
for (Object pojo: feedResponse1) {
timeStamp = Integer.parseInt(pojo.toString());
}
Assert.assertTrue(timeStamp > startTime);
Assert.assertTrue(timeStamp <= endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
} | Assert.assertTrue(timeStamp > startTime); | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<Integer> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, Integer.class);
List<Integer> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0);
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<Integer> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, Integer.class);
List<Integer> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
String query3 = String.format("Select value max(c.sensitiveString) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions3 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec3 = new SqlQuerySpec(query3);
CosmosPagedFlux<String> feedResponseIterator3 =
cosmosEncryptionAsyncContainer.queryItems(querySpec3, cosmosQueryRequestOptions3, String.class);
List<String> feedResponse3 = feedResponseIterator3.byPage().blockFirst().getResults();
assertThat(feedResponse3.size()).isEqualTo(1);
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, priority = 1, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} |
Removed, and added the correct assertion usage to be consistent with the rest of the test cases. | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, EncryptionPojo.class);
List<EncryptionPojo> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
long endTime = Instant.now().getEpochSecond();
int timeStamp = 0;
for (Object pojo: feedResponse1) {
timeStamp = Integer.parseInt(pojo.toString());
}
Assert.assertTrue(timeStamp > startTime);
Assert.assertTrue(timeStamp <= endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
} | Assert.assertTrue(timeStamp <= endTime); | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<Integer> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, Integer.class);
List<Integer> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0);
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<Integer> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, Integer.class);
List<Integer> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
String query3 = String.format("Select value max(c.sensitiveString) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions3 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec3 = new SqlQuerySpec(query3);
CosmosPagedFlux<String> feedResponseIterator3 =
cosmosEncryptionAsyncContainer.queryItems(querySpec3, cosmosQueryRequestOptions3, String.class);
List<String> feedResponse3 = feedResponseIterator3.byPage().blockFirst().getResults();
assertThat(feedResponse3.size()).isEqualTo(1);
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, priority = 1, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} |
I'm curious if we have done any investigation into this approach of copying the `ByteBuffer` into a `byte[]`, and then wrapping this `byte[]` as a new `ByteBuffer`, and if there are better approaches? For example, there are various approaches detailed [here](https://stackoverflow.com/questions/3366925/deep-copy-duplicate-of-javas-bytebuffer). | public BufferedHttpResponse(HttpResponse innerHttpResponse) {
super(innerHttpResponse.getRequest());
this.innerHttpResponse = innerHttpResponse;
this.cachedBody = innerHttpResponse.getBody()
.map(buffer -> {
cachedBodySize.addAndGet(buffer.remaining());
return ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer));
})
.collectList()
.cache();
} | return ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer)); | public BufferedHttpResponse(HttpResponse innerHttpResponse) {
super(innerHttpResponse.getRequest());
this.innerHttpResponse = innerHttpResponse;
this.cachedBody = innerHttpResponse.getBody()
.map(buffer -> {
cachedBodySize.addAndGet(buffer.remaining());
return ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer));
})
.collectList()
.cache();
} | class BufferedHttpResponse extends HttpResponse {
private final ClientLogger logger = new ClientLogger(BufferedHttpResponse.class);
private final HttpResponse innerHttpResponse;
private final Mono<List<ByteBuffer>> cachedBody;
private final AtomicLong cachedBodySize = new AtomicLong();
/**
* Creates a buffered HTTP response.
*
* @param innerHttpResponse The HTTP response to buffer
*/
@Override
public int getStatusCode() {
return innerHttpResponse.getStatusCode();
}
@Override
public String getHeaderValue(String name) {
return innerHttpResponse.getHeaderValue(name);
}
@Override
public HttpHeaders getHeaders() {
return innerHttpResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return cachedBody.flatMapMany(Flux::fromIterable).map(ByteBuffer::duplicate);
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a byte array."))
: FluxUtil.collectBytesInByteBufferStream(getBody(), (int) cachedBodySize.get());
}
@Override
public Mono<String> getBodyAsString() {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a String."))
: getBodyAsByteArray().map(bytes ->
CoreUtils.bomAwareToString(bytes, innerHttpResponse.getHeaderValue("Content-Type")));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a String."))
: getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BufferedHttpResponse buffer() {
return this;
}
} | class BufferedHttpResponse extends HttpResponse {
private final ClientLogger logger = new ClientLogger(BufferedHttpResponse.class);
private final HttpResponse innerHttpResponse;
private final Mono<List<ByteBuffer>> cachedBody;
private final AtomicLong cachedBodySize = new AtomicLong();
/**
* Creates a buffered HTTP response.
*
* @param innerHttpResponse The HTTP response to buffer
*/
@Override
public int getStatusCode() {
return innerHttpResponse.getStatusCode();
}
@Override
public String getHeaderValue(String name) {
return innerHttpResponse.getHeaderValue(name);
}
@Override
public HttpHeaders getHeaders() {
return innerHttpResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return cachedBody.flatMapMany(Flux::fromIterable).map(ByteBuffer::duplicate);
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a byte array."))
: FluxUtil.collectBytesInByteBufferStream(getBody(), (int) cachedBodySize.get());
}
@Override
public Mono<String> getBodyAsString() {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a String."))
: getBodyAsByteArray().map(bytes ->
CoreUtils.bomAwareToString(bytes, innerHttpResponse.getHeaderValue("Content-Type")));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a String."))
: getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BufferedHttpResponse buffer() {
return this;
}
} |
Can you please add tests for queries returning types like Integer, String etc | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<JsonNode> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, JsonNode.class);
List<JsonNode> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0).asInt();
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
} | CosmosPagedFlux<JsonNode> feedResponseIterator1 = | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<Integer> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, Integer.class);
List<Integer> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0);
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<Integer> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, Integer.class);
List<Integer> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
String query3 = String.format("Select value max(c.sensitiveString) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions3 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec3 = new SqlQuerySpec(query3);
CosmosPagedFlux<String> feedResponseIterator3 =
cosmosEncryptionAsyncContainer.queryItems(querySpec3, cosmosQueryRequestOptions3, String.class);
List<String> feedResponse3 = feedResponseIterator3.byPage().blockFirst().getResults();
assertThat(feedResponse3.size()).isEqualTo(1);
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, priority = 1, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} |
I have added test cases for Integer, String. | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<JsonNode> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, JsonNode.class);
List<JsonNode> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0).asInt();
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, EncryptionPojo.class);
List<EncryptionPojo> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
} | CosmosPagedFlux<JsonNode> feedResponseIterator1 = | public void queryItemsAggregate() {
long startTime = Instant.now().getEpochSecond();
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query1 = String.format("Select value max(c._ts) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions1 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec1 = new SqlQuerySpec(query1);
CosmosPagedFlux<Integer> feedResponseIterator1 =
cosmosEncryptionAsyncContainer.queryItems(querySpec1, cosmosQueryRequestOptions1, Integer.class);
List<Integer> feedResponse1 = feedResponseIterator1.byPage().blockFirst().getResults();
int timeStamp = feedResponse1.get(0);
long endTime = Instant.now().getEpochSecond();
assertThat(timeStamp).isGreaterThanOrEqualTo((int)startTime);
assertThat(timeStamp).isLessThanOrEqualTo((int)endTime);
assertThat(feedResponse1.size()).isEqualTo(1);
String query2 = String.format("Select top 1 value count(c) from c order by c._ts");
CosmosQueryRequestOptions cosmosQueryRequestOptions2 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec2 = new SqlQuerySpec(query2);
CosmosPagedFlux<Integer> feedResponseIterator2 =
cosmosEncryptionAsyncContainer.queryItems(querySpec2, cosmosQueryRequestOptions2, Integer.class);
List<Integer> feedResponse2 = feedResponseIterator2.byPage().blockFirst().getResults();
assertThat(feedResponse2.size()).isEqualTo(1);
String query3 = String.format("Select value max(c.sensitiveString) from c");
CosmosQueryRequestOptions cosmosQueryRequestOptions3 = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec3 = new SqlQuerySpec(query3);
CosmosPagedFlux<String> feedResponseIterator3 =
cosmosEncryptionAsyncContainer.queryItems(querySpec3, cosmosQueryRequestOptions3, String.class);
List<String> feedResponse3 = feedResponseIterator3.byPage().blockFirst().getResults();
assertThat(feedResponse3.size()).isEqualTo(1);
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} | class EncryptionAsyncApiCrudTest extends TestSuiteBase {
private CosmosAsyncClient client;
private CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient;
private CosmosEncryptionAsyncContainer encryptionContainerWithIncompatiblePolicyVersion;
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer;
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase;
@Factory(dataProvider = "clientBuilders")
public EncryptionAsyncApiCrudTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@BeforeClass(groups = {"encryption"}, timeOut = SETUP_TIMEOUT)
public void before_CosmosItemTest() {
assertThat(this.client).isNull();
this.client = getClientBuilder().buildAsyncClient();
TestEncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(this.client,
encryptionKeyStoreProvider);
cosmosEncryptionAsyncDatabase = getSharedEncryptionDatabase(cosmosEncryptionAsyncClient);
cosmosEncryptionAsyncContainer = getSharedEncryptionContainer(cosmosEncryptionAsyncClient);
ClientEncryptionPolicy clientEncryptionWithPolicyFormatVersion2 = new ClientEncryptionPolicy(getPaths());
ReflectionUtils.setPolicyFormatVersion(clientEncryptionWithPolicyFormatVersion2, 2);
String containerId = UUID.randomUUID().toString();
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionWithPolicyFormatVersion2);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
encryptionContainerWithIncompatiblePolicyVersion =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
}
@AfterClass(groups = {"encryption"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.client).isNotNull();
this.client.close();
}
@Test(groups = {"encryption"}, priority = 1, timeOut = TIMEOUT)
public void createItemEncrypt_readItemDecrypt() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
properties = getItem(UUID.randomUUID().toString());
String longString = "";
for (int i = 0; i < 10000; i++) {
longString += "a";
}
properties.setSensitiveString(longString);
itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void createItemEncryptWithContentResponseOnWriteEnabledFalse() {
CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions();
requestOptions.setContentResponseOnWriteEnabled(false);
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), requestOptions).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
assertThat(itemResponse.getItem()).isNull();
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void upsertItem_readItem() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.upsertItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
EncryptionPojo readItem = cosmosEncryptionAsyncContainer.readItem(properties.getId(), new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(properties, readItem);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItems() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * from c where c.id = '%s'", properties.getId());
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(querySpec, cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnEncryptedProperties() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", properties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(properties.getId())) {
validateResponse(pojo, responseItem);
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsOnRandomizedEncryption() {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> itemResponse = cosmosEncryptionAsyncContainer.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(itemResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = itemResponse.getItem();
validateResponse(properties, responseItem);
String query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveDouble = @sensitiveDouble");
SqlQuerySpec querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", properties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", properties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveDouble", properties.getSensitiveDouble());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveDouble", parameter3);
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
cosmosQueryRequestOptions, EncryptionPojo.class);
try {
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
fail("Query on randomized parameter should fail");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).contains("Path /sensitiveDouble cannot be used in the " +
"query because of randomized encryption");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void queryItemsWithContinuationTokenAndPageSize() {
List<String> actualIds = new ArrayList<>();
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
properties = getItem(UUID.randomUUID().toString());
cosmosEncryptionAsyncContainer.createItem(properties, new PartitionKey(properties.getMypk()),
new CosmosItemRequestOptions()).block();
actualIds.add(properties.getId());
String query = String.format("SELECT * from c where c.id in ('%s', '%s', '%s')", actualIds.get(0),
actualIds.get(1), actualIds.get(2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
String continuationToken = null;
int pageSize = 1;
int initialDocumentCount = 3;
int finalDocumentCount = 0;
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
cosmosEncryptionAsyncContainer.queryItems(query, cosmosQueryRequestOptions, EncryptionPojo.class);
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, 1).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(initialDocumentCount);
}
@Ignore("Ignoring it temporarily because server always returning policyFormatVersion 0")
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void incompatiblePolicyFormatVersion() {
try {
EncryptionPojo properties = getItem(UUID.randomUUID().toString());
encryptionContainerWithIncompatiblePolicyVersion.createItem(properties,
new PartitionKey(properties.getMypk()), new CosmosItemRequestOptions()).block();
fail("encryptionContainerWithIncompatiblePolicyVersion crud operation should fail on client encryption " +
"policy " +
"fetch because of policy format version greater than 1");
} catch (UnsupportedOperationException ex) {
assertThat(ex.getMessage()).isEqualTo("This version of the Encryption library cannot be used with this " +
"container. Please upgrade to the latest version of the same.");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void crudQueryStaleCache() {
String databaseId = UUID.randomUUID().toString();
try {
createNewDatabaseWithClientEncryptionKey(databaseId);
CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(asyncClient,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(asyncClient.getDatabase(databaseId));
String containerId = UUID.randomUUID().toString();
ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(getPaths());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerOriginal =
cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
EncryptionPojo encryptionPojo = getItem(UUID.randomUUID().toString());
CosmosItemResponse<EncryptionPojo> createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
String query = String.format("SELECT * from c where c.id = '%s'", encryptionPojo.getId());
SqlQuerySpec querySpec = new SqlQuerySpec(query);
CosmosPagedFlux<EncryptionPojo> feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems(querySpec, null, EncryptionPojo.class);
List<EncryptionPojo> feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
EncryptionPojo readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().delete().block();
createNewDatabaseWithClientEncryptionKey(databaseId);
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
createResponse = encryptionAsyncContainerOriginal.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
validateResponse(encryptionPojo, createResponse.getItem());
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
ClientEncryptionPolicy policyWithOneEncryptionPolicy = new ClientEncryptionPolicy(getPathWithOneEncryptionField());
createEncryptionContainer(cosmosEncryptionAsyncDatabase, policyWithOneEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
EncryptionPojo pojoWithOneFieldEncrypted = encryptionAsyncContainerNew.getCosmosAsyncContainer().readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponseWithOneFieldEncryption(encryptionPojo, pojoWithOneFieldEncrypted);
readItem = encryptionAsyncContainerOriginal.readItem(encryptionPojo.getId(), new PartitionKey(encryptionPojo.getMypk()),
new CosmosItemRequestOptions(), EncryptionPojo.class).block().getItem();
validateResponse(encryptionPojo, readItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosItemResponse<EncryptionPojo> upsertResponse = encryptionAsyncContainerOriginal.upsertItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
EncryptionPojo responseItem = upsertResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
encryptionAsyncContainerNew = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
encryptionAsyncContainerNew.createItem(encryptionPojo,
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
CosmosItemResponse<EncryptionPojo> replaceResponse =
encryptionAsyncContainerOriginal.replaceItem(encryptionPojo, encryptionPojo.getId(),
new PartitionKey(encryptionPojo.getMypk()), new CosmosItemRequestOptions()).block();
assertThat(upsertResponse.getRequestCharge()).isGreaterThan(0);
responseItem = replaceResponse.getItem();
validateResponse(encryptionPojo, responseItem);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
CosmosEncryptionAsyncContainer newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
for (int i = 0; i < 10; i++) {
EncryptionPojo pojo = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(pojo,
new PartitionKey(pojo.getMypk()), new CosmosItemRequestOptions()).block();
}
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItems("Select * from C", null, EncryptionPojo.class);
String continuationToken = null;
int pageSize = 3;
int finalDocumentCount = 0;
do {
Iterable<FeedResponse<EncryptionPojo>> feedResponseIterable =
feedResponseIterator.byPage(continuationToken, pageSize).toIterable();
for (FeedResponse<EncryptionPojo> fr : feedResponseIterable) {
int resultSize = fr.getResults().size();
assertThat(resultSize).isLessThanOrEqualTo(pageSize);
finalDocumentCount += fr.getResults().size();
continuationToken = fr.getContinuationToken();
}
} while (continuationToken != null);
assertThat(finalDocumentCount).isEqualTo(10);
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
newEncryptionAsyncContainer = getNewEncryptionContainerProxyObject(cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().getId(), containerId);
EncryptionPojo encryptionPojoForQueryItemsOnEncryptedProperties = getItem(UUID.randomUUID().toString());
newEncryptionAsyncContainer.createItem(encryptionPojoForQueryItemsOnEncryptedProperties,
new PartitionKey(encryptionPojoForQueryItemsOnEncryptedProperties.getMypk()), new CosmosItemRequestOptions()).block();
query = String.format("SELECT * FROM c where c.sensitiveString = @sensitiveString and c.nonSensitive =" +
" " +
"@nonSensitive and c.sensitiveLong = @sensitiveLong");
querySpec = new SqlQuerySpec(query);
SqlParameter parameter1 = new SqlParameter("@nonSensitive", encryptionPojoForQueryItemsOnEncryptedProperties.getNonSensitive());
querySpec.getParameters().add(parameter1);
SqlParameter parameter2 = new SqlParameter("@sensitiveString", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveString());
SqlParameter parameter3 = new SqlParameter("@sensitiveLong", encryptionPojoForQueryItemsOnEncryptedProperties.getSensitiveLong());
SqlQuerySpecWithEncryption sqlQuerySpecWithEncryption = new SqlQuerySpecWithEncryption(querySpec);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveString", parameter2);
sqlQuerySpecWithEncryption.addEncryptionParameter("/sensitiveLong", parameter3);
feedResponseIterator =
encryptionAsyncContainerOriginal.queryItemsOnEncryptedProperties(sqlQuerySpecWithEncryption,
null, EncryptionPojo.class);
feedResponse = feedResponseIterator.byPage().blockFirst().getResults();
assertThat(feedResponse.size()).isGreaterThanOrEqualTo(1);
for (EncryptionPojo pojo : feedResponse) {
if (pojo.getId().equals(encryptionPojoForQueryItemsOnEncryptedProperties.getId())) {
validateResponse(encryptionPojoForQueryItemsOnEncryptedProperties, pojo);
}
}
encryptionAsyncContainerOriginal.getCosmosAsyncContainer().delete().block();
createEncryptionContainer(cosmosEncryptionAsyncDatabase, clientEncryptionPolicy, containerId);
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
CosmosBatchResponse batchResponse = encryptionAsyncContainerOriginal.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(2);
validateResponse(createPojo, batchResponse.getResults().get(0).getItem(EncryptionPojo.class));
validateResponse(createPojo, batchResponse.getResults().get(1).getItem(EncryptionPojo.class));
} finally {
try {
this.client.getDatabase(databaseId).delete().block();
} catch(Exception ex) {
}
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void invalidDataEncryptionKeyAlgorithm() {
try {
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata =
new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1",
"tempmetadata1");
this.cosmosEncryptionAsyncDatabase.createClientEncryptionKey("key1",
"InvalidAlgorithm", metadata).block();
fail("client encryption key create should fail on invalid algorithm");
} catch (IllegalArgumentException ex) {
assertThat(ex.getMessage()).isEqualTo("Invalid Encryption Algorithm 'InvalidAlgorithm'");
}
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecution() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosEncryptionBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
cosmosEncryptionBatch.createItemOperation(createPojo);
cosmosEncryptionBatch.replaceItemOperation(itemId, replacePojo);
cosmosEncryptionBatch.upsertItemOperation(createPojo);
cosmosEncryptionBatch.readItemOperation(itemId);
cosmosEncryptionBatch.deleteItemOperation(itemId);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosEncryptionBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
@Test(groups = {"encryption"}, timeOut = TIMEOUT)
public void batchExecutionWithOptionsApi() {
String itemId= UUID.randomUUID().toString();
EncryptionPojo createPojo = getItem(itemId);
EncryptionPojo replacePojo = getItem(itemId);
replacePojo.setSensitiveString("ReplacedSensitiveString");
CosmosBatch cosmosBatch = CosmosBatch.createCosmosBatch(new PartitionKey(itemId));
CosmosBatchItemRequestOptions cosmosBatchItemRequestOptions = new CosmosBatchItemRequestOptions();
cosmosBatch.createItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.replaceItemOperation(itemId, replacePojo,cosmosBatchItemRequestOptions);
cosmosBatch.upsertItemOperation(createPojo, cosmosBatchItemRequestOptions);
cosmosBatch.readItemOperation(itemId, cosmosBatchItemRequestOptions);
cosmosBatch.deleteItemOperation(itemId, cosmosBatchItemRequestOptions);
CosmosBatchResponse batchResponse = this.cosmosEncryptionAsyncContainer.executeCosmosBatch(cosmosBatch).block();
assertThat(batchResponse.getResults().size()).isEqualTo(5);
assertThat(batchResponse.getResults().get(0).getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code());
assertThat(batchResponse.getResults().get(1).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(2).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(3).getStatusCode()).isEqualTo(HttpResponseStatus.OK.code());
assertThat(batchResponse.getResults().get(4).getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code());
validateResponse(batchResponse.getResults().get(0).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(1).getItem(EncryptionPojo.class), replacePojo);
validateResponse(batchResponse.getResults().get(2).getItem(EncryptionPojo.class), createPojo);
validateResponse(batchResponse.getResults().get(3).getItem(EncryptionPojo.class), createPojo);
}
static void validateResponseWithOneFieldEncryption(EncryptionPojo originalItem, EncryptionPojo result) {
assertThat(result.getId()).isEqualTo(originalItem.getId());
assertThat(result.getNonSensitive()).isEqualTo(originalItem.getNonSensitive());
assertThat(result.getSensitiveString()).isNotEqualTo(originalItem.getSensitiveString());
assertThat(result.getSensitiveInt()).isEqualTo(originalItem.getSensitiveInt());
assertThat(result.getSensitiveFloat()).isEqualTo(originalItem.getSensitiveFloat());
assertThat(result.getSensitiveLong()).isEqualTo(originalItem.getSensitiveLong());
assertThat(result.getSensitiveDouble()).isEqualTo(originalItem.getSensitiveDouble());
assertThat(result.isSensitiveBoolean()).isEqualTo(originalItem.isSensitiveBoolean());
assertThat(result.getSensitiveIntArray()).isEqualTo(originalItem.getSensitiveIntArray());
assertThat(result.getSensitiveStringArray()).isEqualTo(originalItem.getSensitiveStringArray());
assertThat(result.getSensitiveString3DArray()).isEqualTo(originalItem.getSensitiveString3DArray());
}
public static List<ClientEncryptionIncludedPath> getPathWithOneEncryptionField() {
ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath();
includedPath.setClientEncryptionKeyId("key1");
includedPath.setPath("/sensitiveString");
includedPath.setEncryptionType(CosmosEncryptionType.DETERMINISTIC);
includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256);
List<ClientEncryptionIncludedPath> paths = new ArrayList<>();
paths.add(includedPath);
return paths;
}
private void createEncryptionContainer(CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase,
ClientEncryptionPolicy clientEncryptionPolicy,
String containerId) {
CosmosContainerProperties properties = new CosmosContainerProperties(containerId, "/mypk");
properties.setClientEncryptionPolicy(clientEncryptionPolicy);
cosmosEncryptionAsyncDatabase.getCosmosAsyncDatabase().createContainer(properties).block();
}
private void createNewDatabaseWithClientEncryptionKey(String databaseId){
TestEncryptionKeyStoreProvider testEncryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
EncryptionKeyWrapMetadata metadata1 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key1", "tempmetadata1");
EncryptionKeyWrapMetadata metadata2 = new EncryptionKeyWrapMetadata(testEncryptionKeyStoreProvider.getProviderName(), "key2", "tempmetadata2");
cosmosEncryptionAsyncClient.getCosmosAsyncClient().createDatabase(databaseId).block();
CosmosEncryptionAsyncDatabase encryptionAsyncDatabase = cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(databaseId);
encryptionAsyncDatabase.createClientEncryptionKey("key1",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata1).block();
encryptionAsyncDatabase.createClientEncryptionKey("key2",
CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256, metadata2).block();
}
private CosmosEncryptionAsyncContainer getNewEncryptionContainerProxyObject(String databaseId, String containerId) {
CosmosAsyncClient client = getClientBuilder().buildAsyncClient();
EncryptionKeyStoreProvider encryptionKeyStoreProvider = new TestEncryptionKeyStoreProvider();
CosmosEncryptionAsyncClient cosmosEncryptionAsyncClient = CosmosEncryptionAsyncClient.createCosmosEncryptionAsyncClient(client,
encryptionKeyStoreProvider);
CosmosEncryptionAsyncDatabase cosmosEncryptionAsyncDatabase =
cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(client.getDatabase(databaseId));
CosmosEncryptionAsyncContainer cosmosEncryptionAsyncContainer = cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(containerId);
return cosmosEncryptionAsyncContainer;
}
} |
If the cachedBodySize is greater than Integer.MAX_VALUE then will this return a truncated byte[]? We should instead add the following check and throw: ```java if(cachedBodySize.get() > Integer.MAX_VALUE ) { return monoError(logger, new IllegalStateException("Response body size doesn't fit into a byte array")); } ``` | public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesInByteBufferStream(getBody(), (int) cachedBodySize.get());
} | return FluxUtil.collectBytesInByteBufferStream(getBody(), (int) cachedBodySize.get()); | public Mono<byte[]> getBodyAsByteArray() {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a byte array."))
: FluxUtil.collectBytesInByteBufferStream(getBody(), (int) cachedBodySize.get());
} | class BufferedHttpResponse extends HttpResponse {
private final HttpResponse innerHttpResponse;
private final Mono<List<ByteBuffer>> cachedBody;
private final AtomicLong cachedBodySize = new AtomicLong();
/**
* Creates a buffered HTTP response.
*
* @param innerHttpResponse The HTTP response to buffer
*/
public BufferedHttpResponse(HttpResponse innerHttpResponse) {
super(innerHttpResponse.getRequest());
this.innerHttpResponse = innerHttpResponse;
this.cachedBody = innerHttpResponse.getBody()
.map(buffer -> {
cachedBodySize.addAndGet(buffer.remaining());
return ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer));
})
.collectList()
.cache();
}
@Override
public int getStatusCode() {
return innerHttpResponse.getStatusCode();
}
@Override
public String getHeaderValue(String name) {
return innerHttpResponse.getHeaderValue(name);
}
@Override
public HttpHeaders getHeaders() {
return innerHttpResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return cachedBody.flatMapMany(Flux::fromIterable).map(ByteBuffer::duplicate);
}
@Override
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes ->
CoreUtils.bomAwareToString(bytes, innerHttpResponse.getHeaderValue("Content-Type")));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BufferedHttpResponse buffer() {
return this;
}
} | class BufferedHttpResponse extends HttpResponse {
private final ClientLogger logger = new ClientLogger(BufferedHttpResponse.class);
private final HttpResponse innerHttpResponse;
private final Mono<List<ByteBuffer>> cachedBody;
private final AtomicLong cachedBodySize = new AtomicLong();
/**
* Creates a buffered HTTP response.
*
* @param innerHttpResponse The HTTP response to buffer
*/
public BufferedHttpResponse(HttpResponse innerHttpResponse) {
super(innerHttpResponse.getRequest());
this.innerHttpResponse = innerHttpResponse;
this.cachedBody = innerHttpResponse.getBody()
.map(buffer -> {
cachedBodySize.addAndGet(buffer.remaining());
return ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer));
})
.collectList()
.cache();
}
@Override
public int getStatusCode() {
return innerHttpResponse.getStatusCode();
}
@Override
public String getHeaderValue(String name) {
return innerHttpResponse.getHeaderValue(name);
}
@Override
public HttpHeaders getHeaders() {
return innerHttpResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return cachedBody.flatMapMany(Flux::fromIterable).map(ByteBuffer::duplicate);
}
@Override
@Override
public Mono<String> getBodyAsString() {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a String."))
: getBodyAsByteArray().map(bytes ->
CoreUtils.bomAwareToString(bytes, innerHttpResponse.getHeaderValue("Content-Type")));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a String."))
: getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BufferedHttpResponse buffer() {
return this;
}
} |
That is a good addition, should also see if it would be possible to add this into the other `HttpResponse` implementations to prevent needless array creations when we know it'll fail. | public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesInByteBufferStream(getBody(), (int) cachedBodySize.get());
} | return FluxUtil.collectBytesInByteBufferStream(getBody(), (int) cachedBodySize.get()); | public Mono<byte[]> getBodyAsByteArray() {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a byte array."))
: FluxUtil.collectBytesInByteBufferStream(getBody(), (int) cachedBodySize.get());
} | class BufferedHttpResponse extends HttpResponse {
private final HttpResponse innerHttpResponse;
private final Mono<List<ByteBuffer>> cachedBody;
private final AtomicLong cachedBodySize = new AtomicLong();
/**
* Creates a buffered HTTP response.
*
* @param innerHttpResponse The HTTP response to buffer
*/
public BufferedHttpResponse(HttpResponse innerHttpResponse) {
super(innerHttpResponse.getRequest());
this.innerHttpResponse = innerHttpResponse;
this.cachedBody = innerHttpResponse.getBody()
.map(buffer -> {
cachedBodySize.addAndGet(buffer.remaining());
return ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer));
})
.collectList()
.cache();
}
@Override
public int getStatusCode() {
return innerHttpResponse.getStatusCode();
}
@Override
public String getHeaderValue(String name) {
return innerHttpResponse.getHeaderValue(name);
}
@Override
public HttpHeaders getHeaders() {
return innerHttpResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return cachedBody.flatMapMany(Flux::fromIterable).map(ByteBuffer::duplicate);
}
@Override
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(bytes ->
CoreUtils.bomAwareToString(bytes, innerHttpResponse.getHeaderValue("Content-Type")));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BufferedHttpResponse buffer() {
return this;
}
} | class BufferedHttpResponse extends HttpResponse {
private final ClientLogger logger = new ClientLogger(BufferedHttpResponse.class);
private final HttpResponse innerHttpResponse;
private final Mono<List<ByteBuffer>> cachedBody;
private final AtomicLong cachedBodySize = new AtomicLong();
/**
* Creates a buffered HTTP response.
*
* @param innerHttpResponse The HTTP response to buffer
*/
public BufferedHttpResponse(HttpResponse innerHttpResponse) {
super(innerHttpResponse.getRequest());
this.innerHttpResponse = innerHttpResponse;
this.cachedBody = innerHttpResponse.getBody()
.map(buffer -> {
cachedBodySize.addAndGet(buffer.remaining());
return ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer));
})
.collectList()
.cache();
}
@Override
public int getStatusCode() {
return innerHttpResponse.getStatusCode();
}
@Override
public String getHeaderValue(String name) {
return innerHttpResponse.getHeaderValue(name);
}
@Override
public HttpHeaders getHeaders() {
return innerHttpResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return cachedBody.flatMapMany(Flux::fromIterable).map(ByteBuffer::duplicate);
}
@Override
@Override
public Mono<String> getBodyAsString() {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a String."))
: getBodyAsByteArray().map(bytes ->
CoreUtils.bomAwareToString(bytes, innerHttpResponse.getHeaderValue("Content-Type")));
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return (cachedBodySize.get() > Integer.MAX_VALUE)
? monoError(logger, new IllegalStateException(
"Response with body size " + cachedBodySize.get() + " doesn't fit into a String."))
: getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BufferedHttpResponse buffer() {
return this;
}
} |
The use if `this.` consistently may add additional inconsistencies to the code style. I tend to use `this.` only when it is ambiguous whether we are using the member variable or a local. | private void handleError(Throwable error, String message) {
if (this.hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
this.endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
} | this.endpointStates.emitError(error, (signalType, emitResult) -> { | private void handleError(Throwable error, String message) {
if (hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(this.sendLink, this.sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(this.receiveLink, this.receiveLinkHandler);
this.subscriptions = Disposables.composite(
this.receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}]: Settling message: {}", connectionId, linkName,
message.getCorrelationId());
settleMessage(message);
}),
this.receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
this.sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s], linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return this.endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = this.closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName, error);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (this.isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
this.provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
this.sendLink.close();
this.receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
this.sendLink.close();
this.receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return this.isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(this.requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(this.replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
this.sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
this.receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, this.retryOptions, this.activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}]: Scheduling on dispatcher. MessageId[{}]",
connectionId, linkName, messageId);
this.unconfirmedSends.putIfAbsent(messageId, sink);
this.provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = this.sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = this.messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
this.receiveLink.flow(1);
this.sendLink.send(bytes, 0, encodedSize);
delivery.settle();
this.sendLink.advance();
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return this.receiveLinkHandler.getErrorContext(this.receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = this.receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = this.unconfirmedSends.remove(correlationId);
if (sink == null) {
int size = this.unconfirmedSends.size();
logger.warning("connectionId[{}] linkName[{}] Received delivery without pending messageId[{}]. size[{}]",
connectionId, linkName, id, size);
return;
}
sink.success(message);
}
private void onTerminalState(String handlerName) {
if (this.pendingLinkTerminations.get() == 0) {
logger.verbose("connectionId[{}] linkName[{}]: Already disposed send/receive links.");
return;
}
final int remaining = this.pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}]: {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
this.subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
this.endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
this.closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
this.connectionId, this.linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
this.connectionId, this.linkName, this.unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = this.unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
this.connectionId, this.linkName, count, error.getMessage());
}
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(sendLink, sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(receiveLink, receiveLinkHandler);
this.subscriptions = Disposables.composite(
receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}]: Settling message: {}", connectionId, linkName,
message.getCorrelationId());
settleMessage(message);
}),
receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s], linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName, error);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, retryOptions, activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}]: Scheduling on dispatcher. MessageId[{}]",
connectionId, linkName, messageId);
unconfirmedSends.putIfAbsent(messageId, sink);
provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
receiveLink.flow(1);
sendLink.send(bytes, 0, encodedSize);
delivery.settle();
sendLink.advance();
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return receiveLinkHandler.getErrorContext(receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = unconfirmedSends.remove(correlationId);
if (sink == null) {
int size = unconfirmedSends.size();
logger.warning("connectionId[{}] linkName[{}] Received delivery without pending messageId[{}]. size[{}]",
connectionId, linkName, id, size);
return;
}
sink.success(message);
}
private void onTerminalState(String handlerName) {
if (pendingLinkTerminations.get() <= 0) {
logger.verbose("connectionId[{}] linkName[{}]: Already disposed send/receive links.");
return;
}
final int remaining = pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}]: {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
connectionId, linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
connectionId, linkName, unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
connectionId, linkName, count, error.getMessage());
}
} |
I wonder if it is possible for someone else to be performing decrementAndGet before we get to the next line since only this `get()` operation is atomic. maybe `if (this.pendingLinkTerminations.get() <= 0)`? | private void onTerminalState(String handlerName) {
if (this.pendingLinkTerminations.get() == 0) {
logger.verbose("connectionId[{}] linkName[{}]: Already disposed send/receive links.");
return;
}
final int remaining = this.pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}]: {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
this.subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
this.endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
this.closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
} | if (this.pendingLinkTerminations.get() == 0) { | private void onTerminalState(String handlerName) {
if (pendingLinkTerminations.get() <= 0) {
logger.verbose("connectionId[{}] linkName[{}]: Already disposed send/receive links.");
return;
}
final int remaining = pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}]: {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(this.sendLink, this.sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(this.receiveLink, this.receiveLinkHandler);
this.subscriptions = Disposables.composite(
this.receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}]: Settling message: {}", connectionId, linkName,
message.getCorrelationId());
settleMessage(message);
}),
this.receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
this.sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s], linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return this.endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = this.closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName, error);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (this.isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
this.provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
this.sendLink.close();
this.receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
this.sendLink.close();
this.receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return this.isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(this.requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(this.replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
this.sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
this.receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, this.retryOptions, this.activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}]: Scheduling on dispatcher. MessageId[{}]",
connectionId, linkName, messageId);
this.unconfirmedSends.putIfAbsent(messageId, sink);
this.provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = this.sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = this.messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
this.receiveLink.flow(1);
this.sendLink.send(bytes, 0, encodedSize);
delivery.settle();
this.sendLink.advance();
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return this.receiveLinkHandler.getErrorContext(this.receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = this.receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = this.unconfirmedSends.remove(correlationId);
if (sink == null) {
int size = this.unconfirmedSends.size();
logger.warning("connectionId[{}] linkName[{}] Received delivery without pending messageId[{}]. size[{}]",
connectionId, linkName, id, size);
return;
}
sink.success(message);
}
private void handleError(Throwable error, String message) {
if (this.hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
this.endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
this.connectionId, this.linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
this.connectionId, this.linkName, this.unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = this.unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
this.connectionId, this.linkName, count, error.getMessage());
}
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(sendLink, sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(receiveLink, receiveLinkHandler);
this.subscriptions = Disposables.composite(
receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}]: Settling message: {}", connectionId, linkName,
message.getCorrelationId());
settleMessage(message);
}),
receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s], linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName, error);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, retryOptions, activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}]: Scheduling on dispatcher. MessageId[{}]",
connectionId, linkName, messageId);
unconfirmedSends.putIfAbsent(messageId, sink);
provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
receiveLink.flow(1);
sendLink.send(bytes, 0, encodedSize);
delivery.settle();
sendLink.advance();
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return receiveLinkHandler.getErrorContext(receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = unconfirmedSends.remove(correlationId);
if (sink == null) {
int size = unconfirmedSends.size();
logger.warning("connectionId[{}] linkName[{}] Received delivery without pending messageId[{}]. size[{}]",
connectionId, linkName, id, size);
return;
}
sink.success(message);
}
private void handleError(Throwable error, String message) {
if (hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
connectionId, linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
connectionId, linkName, unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
connectionId, linkName, count, error.getMessage());
}
} |
I see, let me revert the usage of `this` then. | private void handleError(Throwable error, String message) {
if (this.hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
this.endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
} | this.endpointStates.emitError(error, (signalType, emitResult) -> { | private void handleError(Throwable error, String message) {
if (hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(this.sendLink, this.sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(this.receiveLink, this.receiveLinkHandler);
this.subscriptions = Disposables.composite(
this.receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}]: Settling message: {}", connectionId, linkName,
message.getCorrelationId());
settleMessage(message);
}),
this.receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
this.sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s], linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return this.endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = this.closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName, error);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (this.isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
this.provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
this.sendLink.close();
this.receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
this.sendLink.close();
this.receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return this.isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(this.requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(this.replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
this.sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
this.receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, this.retryOptions, this.activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}]: Scheduling on dispatcher. MessageId[{}]",
connectionId, linkName, messageId);
this.unconfirmedSends.putIfAbsent(messageId, sink);
this.provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = this.sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = this.messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
this.receiveLink.flow(1);
this.sendLink.send(bytes, 0, encodedSize);
delivery.settle();
this.sendLink.advance();
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return this.receiveLinkHandler.getErrorContext(this.receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = this.receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = this.unconfirmedSends.remove(correlationId);
if (sink == null) {
int size = this.unconfirmedSends.size();
logger.warning("connectionId[{}] linkName[{}] Received delivery without pending messageId[{}]. size[{}]",
connectionId, linkName, id, size);
return;
}
sink.success(message);
}
private void onTerminalState(String handlerName) {
if (this.pendingLinkTerminations.get() == 0) {
logger.verbose("connectionId[{}] linkName[{}]: Already disposed send/receive links.");
return;
}
final int remaining = this.pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}]: {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
this.subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
this.endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
this.closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
this.connectionId, this.linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
this.connectionId, this.linkName, this.unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = this.unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
this.connectionId, this.linkName, count, error.getMessage());
}
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(sendLink, sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(receiveLink, receiveLinkHandler);
this.subscriptions = Disposables.composite(
receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}]: Settling message: {}", connectionId, linkName,
message.getCorrelationId());
settleMessage(message);
}),
receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s], linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName, error);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, retryOptions, activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}]: Scheduling on dispatcher. MessageId[{}]",
connectionId, linkName, messageId);
unconfirmedSends.putIfAbsent(messageId, sink);
provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
receiveLink.flow(1);
sendLink.send(bytes, 0, encodedSize);
delivery.settle();
sendLink.advance();
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return receiveLinkHandler.getErrorContext(receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = unconfirmedSends.remove(correlationId);
if (sink == null) {
int size = unconfirmedSends.size();
logger.warning("connectionId[{}] linkName[{}] Received delivery without pending messageId[{}]. size[{}]",
connectionId, linkName, id, size);
return;
}
sink.success(message);
}
private void onTerminalState(String handlerName) {
if (pendingLinkTerminations.get() <= 0) {
logger.verbose("connectionId[{}] linkName[{}]: Already disposed send/receive links.");
return;
}
final int remaining = pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}]: {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
connectionId, linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
connectionId, linkName, unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
connectionId, linkName, count, error.getMessage());
}
} |
Good point, will switch to `<= 0` | private void onTerminalState(String handlerName) {
if (this.pendingLinkTerminations.get() == 0) {
logger.verbose("connectionId[{}] linkName[{}]: Already disposed send/receive links.");
return;
}
final int remaining = this.pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}]: {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
this.subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
this.endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
this.closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
} | if (this.pendingLinkTerminations.get() == 0) { | private void onTerminalState(String handlerName) {
if (pendingLinkTerminations.get() <= 0) {
logger.verbose("connectionId[{}] linkName[{}]: Already disposed send/receive links.");
return;
}
final int remaining = pendingLinkTerminations.decrementAndGet();
logger.verbose("connectionId[{}] linkName[{}]: {} disposed. Remaining: {}",
connectionId, linkName, handlerName, remaining);
if (remaining == 0) {
subscriptions.dispose();
terminateUnconfirmedSends(new AmqpException(true,
"The RequestResponseChannel didn't receive the acknowledgment for the send due receive link termination.",
null));
endpointStates.emitComplete(((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
"Could not emit complete signal.")));
closeMono.emitEmpty((signalType, emitResult) -> onEmitSinkFailure(signalType, emitResult,
handlerName + ". Error closing mono."));
}
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(this.sendLink, this.sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(this.receiveLink, this.receiveLinkHandler);
this.subscriptions = Disposables.composite(
this.receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}]: Settling message: {}", connectionId, linkName,
message.getCorrelationId());
settleMessage(message);
}),
this.receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
this.sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s], linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return this.endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = this.closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName, error);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (this.isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
this.provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
this.sendLink.close();
this.receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
this.sendLink.close();
this.receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return this.isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(this.requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(this.replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
this.sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
this.receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, this.retryOptions, this.activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}]: Scheduling on dispatcher. MessageId[{}]",
connectionId, linkName, messageId);
this.unconfirmedSends.putIfAbsent(messageId, sink);
this.provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = this.sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = this.messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
this.receiveLink.flow(1);
this.sendLink.send(bytes, 0, encodedSize);
delivery.settle();
this.sendLink.advance();
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return this.receiveLinkHandler.getErrorContext(this.receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = this.receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = this.unconfirmedSends.remove(correlationId);
if (sink == null) {
int size = this.unconfirmedSends.size();
logger.warning("connectionId[{}] linkName[{}] Received delivery without pending messageId[{}]. size[{}]",
connectionId, linkName, id, size);
return;
}
sink.success(message);
}
private void handleError(Throwable error, String message) {
if (this.hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
this.endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
this.connectionId, this.linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
this.connectionId, this.linkName, this.unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = this.unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
this.connectionId, this.linkName, count, error.getMessage());
}
} | class RequestResponseChannel implements AsyncCloseable {
private final ClientLogger logger = new ClientLogger(RequestResponseChannel.class);
private final Sender sendLink;
private final Receiver receiveLink;
private final SendLinkHandler sendLinkHandler;
private final ReceiveLinkHandler receiveLinkHandler;
private final SenderSettleMode senderSettleMode;
private final Sinks.Many<AmqpEndpointState> endpointStates = Sinks.many().multicast().onBackpressureBuffer();
private volatile AmqpEndpointState sendLinkState;
private volatile AmqpEndpointState receiveLinkState;
private final AtomicLong requestId = new AtomicLong(0);
private final ConcurrentSkipListMap<UnsignedLong, MonoSink<Message>> unconfirmedSends =
new ConcurrentSkipListMap<>();
private final AtomicInteger pendingLinkTerminations = new AtomicInteger(2);
private final Sinks.One<Void> closeMono = Sinks.one();
private final AtomicBoolean hasError = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final Disposable.Composite subscriptions;
private final String connectionId;
private final String linkName;
private final AmqpRetryOptions retryOptions;
private final String replyTo;
private final String activeEndpointTimeoutMessage;
private final MessageSerializer messageSerializer;
private final ReactorProvider provider;
/**
* Creates a new instance of {@link RequestResponseChannel} to send and receive responses from the {@code
* entityPath} in the message broker.
*
* @param connectionId Identifier of the connection.
* @param fullyQualifiedNamespace Fully qualified namespace for the the host.
* @param linkName Name of the link.
* @param entityPath Address in the message broker to send message to.
* @param session Reactor session associated with this link.
* @param retryOptions Retry options to use for sending the request response.
* @param handlerProvider Provides handlers that interact with proton-j's reactor.
* @param provider The reactor provider that the request will be sent with.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
protected RequestResponseChannel(AmqpConnection amqpConnection, String connectionId,
String fullyQualifiedNamespace, String linkName, String entityPath, Session session,
AmqpRetryOptions retryOptions, ReactorHandlerProvider handlerProvider, ReactorProvider provider,
MessageSerializer messageSerializer, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
this.connectionId = connectionId;
this.linkName = linkName;
this.retryOptions = retryOptions;
this.provider = provider;
this.senderSettleMode = senderSettleMode;
this.activeEndpointTimeoutMessage = String.format(
"RequestResponseChannel connectionId[%s], linkName[%s]: Waiting for send and receive handler to be ACTIVE",
connectionId, linkName);
this.replyTo = entityPath.replace("$", "") + "-client-reply-to";
this.messageSerializer = messageSerializer;
this.sendLink = session.sender(linkName + ":sender");
final Target senderTarget = new Target();
senderTarget.setAddress(entityPath);
this.sendLink.setTarget(senderTarget);
this.sendLink.setSource(new Source());
this.sendLink.setSenderSettleMode(senderSettleMode);
this.sendLinkHandler = handlerProvider.createSendLinkHandler(connectionId, fullyQualifiedNamespace, linkName,
entityPath);
BaseHandler.setHandler(sendLink, sendLinkHandler);
this.receiveLink = session.receiver(linkName + ":receiver");
final Source receiverSource = new Source();
receiverSource.setAddress(entityPath);
this.receiveLink.setSource(receiverSource);
final Target receiverTarget = new Target();
receiverTarget.setAddress(replyTo);
this.receiveLink.setTarget(receiverTarget);
this.receiveLink.setSenderSettleMode(senderSettleMode);
this.receiveLink.setReceiverSettleMode(receiverSettleMode);
this.receiveLinkHandler = handlerProvider.createReceiveLinkHandler(connectionId, fullyQualifiedNamespace,
linkName, entityPath);
BaseHandler.setHandler(receiveLink, receiveLinkHandler);
this.subscriptions = Disposables.composite(
receiveLinkHandler.getDeliveredMessages()
.map(this::decodeDelivery)
.subscribe(message -> {
logger.verbose("connectionId[{}], linkName[{}]: Settling message: {}", connectionId, linkName,
message.getCorrelationId());
settleMessage(message);
}),
receiveLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(null, AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
handleError(error, "Error in ReceiveLinkHandler.");
onTerminalState("ReceiveLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("ReceiveLinkHandler");
}),
sendLinkHandler.getEndpointStates().subscribe(state -> {
updateEndpointState(AmqpEndpointStateUtil.getConnectionState(state), null);
}, error -> {
handleError(error, "Error in SendLinkHandler.");
onTerminalState("SendLinkHandler");
}, () -> {
closeAsync().subscribe();
onTerminalState("SendLinkHandler");
}),
amqpConnection.getShutdownSignals().next().flatMap(signal -> {
logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", connectionId, linkName);
return closeAsync();
}).subscribe()
);
try {
this.provider.getReactorDispatcher().invoke(() -> {
this.sendLink.open();
this.receiveLink.open();
});
} catch (IOException e) {
throw logger.logExceptionAsError(new RuntimeException(String.format(
"connectionId[%s], linkName[%s]: Unable to open send and receive link.", connectionId, linkName), e));
}
}
/**
* Gets the endpoint states for the request-response-channel.
*
* @return The endpoint states for the request-response-channel.
*/
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates.asFlux();
}
@Override
public Mono<Void> closeAsync() {
final Mono<Void> closeOperationWithTimeout = closeMono.asMono()
.timeout(retryOptions.getTryTimeout())
.onErrorResume(TimeoutException.class, error -> {
return Mono.fromRunnable(() -> {
logger.info("connectionId[{}] linkName[{}] Timed out waiting for RequestResponseChannel to complete"
+ " closing. Manually closing.",
connectionId, linkName, error);
onTerminalState("SendLinkHandler");
onTerminalState("ReceiveLinkHandler");
});
})
.subscribeOn(Schedulers.boundedElastic());
if (isDisposed.getAndSet(true)) {
logger.verbose("connectionId[{}] linkName[{}] Channel already closed.", connectionId, linkName);
return closeOperationWithTimeout;
}
logger.verbose("connectionId[{}] linkName[{}] Closing request/response channel.", connectionId, linkName);
return Mono.fromRunnable(() -> {
try {
provider.getReactorDispatcher().invoke(() -> {
logger.verbose("connectionId[{}] linkName[{}] Closing send link and receive link.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
});
} catch (IOException | RejectedExecutionException e) {
logger.info("connectionId[{}] linkName[{}] Unable to schedule close work. Closing manually.",
connectionId, linkName);
sendLink.close();
receiveLink.close();
}
}).subscribeOn(Schedulers.boundedElastic()).then(closeOperationWithTimeout);
}
public boolean isDisposed() {
return isDisposed.get();
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message) {
return sendWithAck(message, null);
}
/**
* Sends a message to the message broker using the {@code dispatcher} and gets the response.
*
* @param message AMQP message to send.
* @param deliveryState Delivery state to be sent to service bus with message.
*
* @return An AMQP message representing the service's response to the message.
*/
public Mono<Message> sendWithAck(final Message message, DeliveryState deliveryState) {
if (isDisposed()) {
return monoError(logger, new IllegalStateException(
"Cannot send a message when request response channel is disposed."));
}
if (message == null) {
return monoError(logger, new NullPointerException("message cannot be null"));
}
if (message.getMessageId() != null) {
return monoError(logger, new IllegalArgumentException("message.getMessageId() should be null"));
}
if (message.getReplyTo() != null) {
return monoError(logger, new IllegalArgumentException("message.getReplyTo() should be null"));
}
final UnsignedLong messageId = UnsignedLong.valueOf(requestId.incrementAndGet());
message.setMessageId(messageId);
message.setReplyTo(replyTo);
final Mono<Void> onActiveEndpoints = Mono.when(
sendLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE),
receiveLinkHandler.getEndpointStates().takeUntil(x -> x == EndpointState.ACTIVE));
return RetryUtil.withRetry(onActiveEndpoints, retryOptions, activeEndpointTimeoutMessage)
.then(Mono.create(sink -> {
try {
logger.verbose("connectionId[{}], linkName[{}]: Scheduling on dispatcher. MessageId[{}]",
connectionId, linkName, messageId);
unconfirmedSends.putIfAbsent(messageId, sink);
provider.getReactorDispatcher().invoke(() -> {
final Delivery delivery = sendLink.delivery(UUID.randomUUID().toString()
.replace("-", "").getBytes(UTF_8));
if (deliveryState != null) {
logger.verbose("connectionId[{}], linkName[{}]: Setting delivery state as [{}].",
connectionId, linkName, deliveryState);
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
delivery.disposition(deliveryState);
}
final int payloadSize = messageSerializer.getSize(message)
+ ClientConstants.MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[payloadSize];
final int encodedSize = message.encode(bytes, 0, payloadSize);
receiveLink.flow(1);
sendLink.send(bytes, 0, encodedSize);
delivery.settle();
sendLink.advance();
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* Gets the error context for the channel.
*
* @return The error context for the channel.
*/
public AmqpErrorContext getErrorContext() {
return receiveLinkHandler.getErrorContext(receiveLink);
}
protected Message decodeDelivery(Delivery delivery) {
final Message response = Proton.message();
final int msgSize = delivery.pending();
final byte[] buffer = new byte[msgSize];
final int read = receiveLink.recv(buffer, 0, msgSize);
response.decode(buffer, 0, read);
if (this.senderSettleMode == SenderSettleMode.SETTLED) {
delivery.disposition(Accepted.getInstance());
delivery.settle();
}
return response;
}
private void settleMessage(Message message) {
final String id = String.valueOf(message.getCorrelationId());
final UnsignedLong correlationId = UnsignedLong.valueOf(id);
final MonoSink<Message> sink = unconfirmedSends.remove(correlationId);
if (sink == null) {
int size = unconfirmedSends.size();
logger.warning("connectionId[{}] linkName[{}] Received delivery without pending messageId[{}]. size[{}]",
connectionId, linkName, id, size);
return;
}
sink.success(message);
}
private void handleError(Throwable error, String message) {
if (hasError.getAndSet(true)) {
return;
}
logger.error("connectionId[{}] linkName[{}] {} Disposing unconfirmed sends.", connectionId, linkName, message,
error);
endpointStates.emitError(error, (signalType, emitResult) -> {
logger.warning("connectionId[{}] linkName[{}] signal[{}] result[{}] Could not emit error to sink.",
connectionId, linkName, signalType, emitResult);
return false;
});
terminateUnconfirmedSends(error);
closeAsync().subscribe();
}
private boolean onEmitSinkFailure(SignalType signalType, Sinks.EmitResult emitResult, String message) {
logger.verbose("connectionId[{}] linkName[{}] signal[{}] result[{}] {}",
connectionId, linkName, signalType, emitResult, message);
return false;
}
private synchronized void updateEndpointState(AmqpEndpointState sendLinkState, AmqpEndpointState receiveLinkState) {
if (sendLinkState != null) {
this.sendLinkState = sendLinkState;
} else if (receiveLinkState != null) {
this.receiveLinkState = receiveLinkState;
}
logger.verbose("connectionId[{}] linkName[{}] sendState[{}] receiveState[{}] Updating endpoint states.",
connectionId, linkName, this.sendLinkState, this.receiveLinkState);
if (this.sendLinkState == this.receiveLinkState) {
this.endpointStates.emitNext(this.sendLinkState, Sinks.EmitFailureHandler.FAIL_FAST);
}
}
private void terminateUnconfirmedSends(Throwable error) {
logger.verbose("connectionId[{}] linkName[{}] terminating {} unconfirmed sends (reason: {}).",
connectionId, linkName, unconfirmedSends.size(), error.getMessage());
Map.Entry<UnsignedLong, MonoSink<Message>> next;
int count = 0;
while ((next = unconfirmedSends.pollFirstEntry()) != null) {
next.getValue().error(error);
count++;
}
logger.verbose("connectionId[{}] linkName[{}] completed the termination of {} unconfirmed sends (reason: {}).",
connectionId, linkName, count, error.getMessage());
}
} |
`ALL.equals(String eTag)` isn't correct as we'll end up checking the Object pointer as ETag doesn't have a `equals(String)` method. You'll want to compare this to the `ALL.eTag` value | private void checkValidETag(String eTag) {
if (eTag == null || ALL.equals(eTag))
return;
if ((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
} | if (eTag == null || ALL.equals(eTag)) | private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String EMPTY_STRING = "";
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
public static final String DEFAULT_FORMAT = "G";
public static final String HEADER_FORMAT = "H";
public static final ETag ALL = new ETag("*");
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The eTag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
/**
* It returns the ETag value in specific format. If {@link
* value. If {@link
* it returns as it is if DEFAULT_FORMAT, "12345" if HEADER_FORMAT.
*
* @param format A valid format value is {@link
*
* @return The ETag value in specific format.
*/
public String toString(String format) {
if (format == null) {
return EMPTY_STRING;
}
if (DEFAULT_FORMAT.equals(format)) {
return eTag;
} else if (HEADER_FORMAT.equals(format)) {
return String.format("\"%s\"", eTag);
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
String.format("Invalid format string, \"%s\".", format)));
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (eTag == null) {
return ((ETag) o).eTag == null;
} else {
return eTag.equals(((ETag) o).eTag);
}
}
@Override
public int hashCode() {
return Objects.hashCode(eTag);
}
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
} |
Surely we should properly implement equals / hashCode instead? | private void checkValidETag(String eTag) {
if (eTag == null || ALL.equals(eTag))
return;
if ((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
} | if (eTag == null || ALL.equals(eTag)) | private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String EMPTY_STRING = "";
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
public static final String DEFAULT_FORMAT = "G";
public static final String HEADER_FORMAT = "H";
public static final ETag ALL = new ETag("*");
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The eTag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
/**
* It returns the ETag value in specific format. If {@link
* value. If {@link
* it returns as it is if DEFAULT_FORMAT, "12345" if HEADER_FORMAT.
*
* @param format A valid format value is {@link
*
* @return The ETag value in specific format.
*/
public String toString(String format) {
if (format == null) {
return EMPTY_STRING;
}
if (DEFAULT_FORMAT.equals(format)) {
return eTag;
} else if (HEADER_FORMAT.equals(format)) {
return String.format("\"%s\"", eTag);
}
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
String.format("Invalid format string, \"%s\".", format)));
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (eTag == null) {
return ((ETag) o).eTag == null;
} else {
return eTag.equals(((ETag) o).eTag);
}
}
@Override
public int hashCode() {
return Objects.hashCode(eTag);
}
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
} |
@lmolkova updated. Can you take another look? Thank you! | public boolean equals(Object o) {
if (o == this) {
return true;
}
if (!(o instanceof ETag)) {
return false;
}
ETag oETag = (ETag) o;
return this.eTag.equals(oETag.eTag);
} | return this.eTag.equals(oETag.eTag); | public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (eTag == null) {
return ((ETag) o).eTag == null;
} else {
return eTag.equals(((ETag) o).eTag);
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
@Override
public int hashCode() {
return this.eTag.hashCode();
}
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
@Override
public int hashCode() {
return Objects.hashCode(eTag);
}
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} |
it seems null etag is considered valid, so this would throw along with hash code. Should we allow null etags? | public boolean equals(Object o) {
if (o == this) {
return true;
}
if (!(o instanceof ETag)) {
return false;
}
ETag oETag = (ETag) o;
return this.eTag.equals(oETag.eTag);
} | return this.eTag.equals(oETag.eTag); | public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (eTag == null) {
return ((ETag) o).eTag == null;
} else {
return eTag.equals(((ETag) o).eTag);
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
@Override
public int hashCode() {
return this.eTag.hashCode();
}
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
@Override
public int hashCode() {
return Objects.hashCode(eTag);
}
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} |
Yes. Good point. I will update it to allow null ETag | public boolean equals(Object o) {
if (o == this) {
return true;
}
if (!(o instanceof ETag)) {
return false;
}
ETag oETag = (ETag) o;
return this.eTag.equals(oETag.eTag);
} | return this.eTag.equals(oETag.eTag); | public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (eTag == null) {
return ((ETag) o).eTag == null;
} else {
return eTag.equals(((ETag) o).eTag);
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
@Override
public int hashCode() {
return this.eTag.hashCode();
}
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
@Override
public int hashCode() {
return Objects.hashCode(eTag);
}
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} |
it would throw on null etag | public int hashCode() {
return this.eTag.hashCode();
} | return this.eTag.hashCode(); | public int hashCode() {
return Objects.hashCode(eTag);
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (this.eTag == null) {
return ((ETag) o).eTag == null;
} else {
return this.eTag.equals(((ETag) o).eTag);
}
}
@Override
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (eTag == null) {
return ((ETag) o).eTag == null;
} else {
return eTag.equals(((ETag) o).eTag);
}
}
@Override
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} |
Ah. Thanks. Updated. | public int hashCode() {
return this.eTag.hashCode();
} | return this.eTag.hashCode(); | public int hashCode() {
return Objects.hashCode(eTag);
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (this.eTag == null) {
return ((ETag) o).eTag == null;
} else {
return this.eTag.equals(((ETag) o).eTag);
}
}
@Override
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} | class ETag {
private static final ClientLogger LOGGER = new ClientLogger(ETag.class);
private static final String QUOTE_STRING = "\"";
private static final String WEAK_ETAG_PREFIX_QUOTE = "W/\"";
private static final String ASTERISK = "*";
/**
* The asterisk is a special value representing any resource.
*/
public static final ETag ALL = new ETag(ASTERISK);
private final String eTag;
/**
* Creates a new instance of {@link ETag}.
*
* @param eTag The HTTP entity tag string value.
*/
public ETag(String eTag) {
checkValidETag(eTag);
this.eTag = eTag;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null) {
return false;
} else if (!(o instanceof ETag)) {
return false;
} else if (eTag == null) {
return ((ETag) o).eTag == null;
} else {
return eTag.equals(((ETag) o).eTag);
}
}
@Override
@Override
public String toString() {
return eTag;
}
/**
* Checks if the {@code eTag} a valid ETag value. Valid ETags show below,
* - The special character, '*'.
* - A strong ETag, which the value is wrapped in quotes, ex, "12345".
* - A weak ETag, which value is wrapped in quotes and prefixed by "W/", ex, W/"12345".
*
* @param eTag ETag string value.
*/
private void checkValidETag(String eTag) {
if (eTag == null || ASTERISK.equals(eTag)) {
return;
}
if (!((eTag.startsWith(QUOTE_STRING) || eTag.startsWith(WEAK_ETAG_PREFIX_QUOTE))
&& eTag.endsWith(QUOTE_STRING))) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(
"The value=%s should be equal to * , be wrapped in quotes, or be wrapped in quotes prefixed by W/",
eTag)));
}
}
} |
Use the enum type here, and when performing a concatenation, the toString will be called automatically. | private String getTracingInfo(HttpRequest request) {
String track = System.getenv(RequestTracingConstants.REQUEST_TRACING_DISABLED_ENVIRONMENT_VARIABLE.toString());
if (track != null && track.equalsIgnoreCase("false")) {
return "";
}
String requestTypeValue = watchRequests ? RequestType.WATCH.toString() : RequestType.STARTUP.toString();
String tracingInfo = RequestTracingConstants.REQUEST_TYPE_KEY.toString() + "=" + requestTypeValue;
String hostType = getHostType();
if (!hostType.isEmpty()) {
tracingInfo += "," + RequestTracingConstants.HOST_TYPE_KEY + "=" + getHostType();
}
if (isDev || isKeyVaultConfigured) {
tracingInfo += ",Env=" + getEnvInfo();
}
return tracingInfo;
} | String requestTypeValue = watchRequests ? RequestType.WATCH.toString() : RequestType.STARTUP.toString(); | private String getTracingInfo(HttpRequest request) {
String track = System.getenv(RequestTracingConstants.REQUEST_TRACING_DISABLED_ENVIRONMENT_VARIABLE.toString());
if (track != null && track.equalsIgnoreCase("false")) {
return "";
}
RequestType requestTypeValue = watchRequests ? RequestType.WATCH : RequestType.STARTUP;
String tracingInfo = RequestTracingConstants.REQUEST_TYPE_KEY.toString() + "=" + requestTypeValue;
String hostType = getHostType();
if (!hostType.isEmpty()) {
tracingInfo += "," + RequestTracingConstants.HOST_TYPE_KEY + "=" + getHostType();
}
if (isDev || isKeyVaultConfigured) {
tracingInfo += ",Env=" + getEnvInfo();
}
return tracingInfo;
} | class BaseAppConfigurationPolicy implements HttpPipelinePolicy {
private static final String PACKAGE_NAME = BaseAppConfigurationPolicy.class.getPackage().getImplementationTitle();
public static final String USER_AGENT = String.format("%s/%s", StringUtils.replace(PACKAGE_NAME, " ", ""),
BaseAppConfigurationPolicy.class.getPackage().getImplementationVersion());
static Boolean watchRequests = false;
final Boolean isDev;
final Boolean isKeyVaultConfigured;
public BaseAppConfigurationPolicy(Boolean isDev, Boolean isKeyVaultConfigured) {
this.isDev = isDev;
this.isKeyVaultConfigured = isKeyVaultConfigured;
}
/**
*
* Checks if Azure App Configuration Tracing is disabled, and if not gets tracing information.
*
* @param request The http request that will be traced, used to check operation being run.
* @return String of the value for the correlation-context header.
*/
private String getEnvInfo() {
String envInfo = "";
envInfo = buildEnvTracingInfo(envInfo, isDev, DEV_ENV_TRACING);
envInfo = buildEnvTracingInfo(envInfo, isKeyVaultConfigured, KEY_VAULT_CONFIGURED_TRACING);
return envInfo;
}
private String buildEnvTracingInfo(String envInfo, Boolean check, String checkString) {
if (check) {
if (envInfo.length() > 0) {
envInfo += ",";
}
envInfo += checkString;
}
return envInfo;
}
/**
* Gets the current host machines type; Azure Function, Azure Web App, Kubernetes, or Empty.
*
* @return String of Host Type
*/
private static String getHostType() {
HostType hostType = HostType.UNIDENTIFIED;
if (System.getenv(RequestTracingConstants.AZURE_FUNCTIONS_ENVIRONMENT_VARIABLE.toString()) != null) {
hostType = HostType.AZURE_FUNCTION;
} else if (System.getenv(RequestTracingConstants.AZURE_WEB_APP_ENVIRONMENT_VARIABLE.toString()) != null) {
hostType = HostType.AZURE_WEB_APP;
} else if (System.getenv(RequestTracingConstants.KUBERNETES_ENVIRONMENT_VARIABLE.toString()) != null) {
hostType = HostType.KUBERNETES;
}
return hostType.toString();
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
String sdkUserAgent = context.getHttpRequest().getHeaders().get(USER_AGENT_TYPE).getValue();
context.getHttpRequest().getHeaders().set(USER_AGENT_TYPE, USER_AGENT + " " + sdkUserAgent);
context.getHttpRequest().getHeaders().set(RequestTracingConstants.CORRELATION_CONTEXT_HEADER.toString(),
getTracingInfo(context.getHttpRequest()));
return next.process();
}
/**
* @param watchRequests the watchRequests to set
*/
public static void setWatchRequests(Boolean watchRequests) {
BaseAppConfigurationPolicy.watchRequests = watchRequests;
}
} | class BaseAppConfigurationPolicy implements HttpPipelinePolicy {
private static final String PACKAGE_NAME = BaseAppConfigurationPolicy.class.getPackage().getImplementationTitle();
public static final String USER_AGENT = String.format("%s/%s", StringUtils.replace(PACKAGE_NAME, " ", ""),
BaseAppConfigurationPolicy.class.getPackage().getImplementationVersion());
static Boolean watchRequests = false;
final boolean isDev;
final boolean isKeyVaultConfigured;
public BaseAppConfigurationPolicy(Boolean isDev, Boolean isKeyVaultConfigured) {
this.isDev = isDev;
this.isKeyVaultConfigured = isKeyVaultConfigured;
}
/**
*
* Checks if Azure App Configuration Tracing is disabled, and if not gets tracing information.
*
* @param request The http request that will be traced, used to check operation being run.
* @return String of the value for the correlation-context header.
*/
private String getEnvInfo() {
String envInfo = "";
envInfo = buildEnvTracingInfo(envInfo, isDev, DEV_ENV_TRACING);
envInfo = buildEnvTracingInfo(envInfo, isKeyVaultConfigured, KEY_VAULT_CONFIGURED_TRACING);
return envInfo;
}
private String buildEnvTracingInfo(String envInfo, Boolean check, String checkString) {
if (check) {
if (envInfo.length() > 0) {
envInfo += ",";
}
envInfo += checkString;
}
return envInfo;
}
/**
* Gets the current host machines type; Azure Function, Azure Web App, Kubernetes, or Empty.
*
* @return String of Host Type
*/
private static String getHostType() {
HostType hostType = HostType.UNIDENTIFIED;
if (System.getenv(RequestTracingConstants.AZURE_FUNCTIONS_ENVIRONMENT_VARIABLE.toString()) != null) {
hostType = HostType.AZURE_FUNCTION;
} else if (System.getenv(RequestTracingConstants.AZURE_WEB_APP_ENVIRONMENT_VARIABLE.toString()) != null) {
hostType = HostType.AZURE_WEB_APP;
} else if (System.getenv(RequestTracingConstants.KUBERNETES_ENVIRONMENT_VARIABLE.toString()) != null) {
hostType = HostType.KUBERNETES;
}
return hostType.toString();
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
String sdkUserAgent = context.getHttpRequest().getHeaders().get(USER_AGENT_TYPE).getValue();
context.getHttpRequest().getHeaders().set(USER_AGENT_TYPE, USER_AGENT + " " + sdkUserAgent);
context.getHttpRequest().getHeaders().set(RequestTracingConstants.CORRELATION_CONTEXT_HEADER.toString(),
getTracingInfo(context.getHttpRequest()));
return next.process();
}
/**
* @param watchRequests the watchRequests to set
*/
public static void setWatchRequests(Boolean watchRequests) {
BaseAppConfigurationPolicy.watchRequests = watchRequests;
}
} |
will the beanfactory be null? | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (beanFactory != null && bean instanceof AbstractAzureHttpClientBuilderFactory) {
HttpPipelinePolicy policy = beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME, HttpPipelinePolicy.class);
if (policy != null) {
AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean;
builderFactory.addHttpPipelinePolicy(policy);
LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass());
} else {
LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass());
}
}
return bean;
} | if (beanFactory != null && bean instanceof AbstractAzureHttpClientBuilderFactory) { | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureHttpClientBuilderFactory) {
HttpPipelinePolicy policy = beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME, HttpPipelinePolicy.class);
if (policy != null) {
AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean;
builderFactory.addHttpPipelinePolicy(policy);
LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass());
} else {
LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass());
}
}
return bean;
} | class AzureHttpClientBuilderFactoryBeanPostProcessor
implements BeanPostProcessor, Ordered, BeanFactoryAware {
private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class);
public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy";
@Nullable
private BeanFactory beanFactory;
@Override
public int getOrder() {
return LOWEST_PRECEDENCE;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
@Override
@SuppressWarnings({ "rawtypes", "unchecked" })
} | class AzureHttpClientBuilderFactoryBeanPostProcessor
implements BeanPostProcessor, Ordered, BeanFactoryAware {
private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class);
public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy";
private BeanFactory beanFactory;
@Override
public int getOrder() {
return LOWEST_PRECEDENCE;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
@Override
@SuppressWarnings({ "rawtypes", "unchecked" })
} |
do we need this comment? | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ((boolean) context.getData(DISABLE_TRACING_KEY).orElse(false)) {
return next.process();
}
Span parentSpan = (Span) context.getData(PARENT_SPAN_KEY).orElse(tracer.currentSpan());
HttpRequest request = context.getHttpRequest();
final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl());
Span.Builder spanBuilder = tracer.spanBuilder().name(urlBuilder.getPath())
.setParent(parentSpan.context());
spanBuilder.kind(Span.Kind.CLIENT);
Span span = spanBuilder.start();
if (!span.isNoop()) {
addSpanRequestAttributes(span, request, context);
}
TraceContext traceContext = span.context();
if (isValid(traceContext)) {
propagator.inject(traceContext, request, contextSetter);
}
return next.process()
.doOnEach(SleuthHttpPolicy::handleResponse)
.contextWrite(Context.of("TRACING_SPAN", span, "REQUEST", request));
} | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ((boolean) context.getData(DISABLE_TRACING_KEY).orElse(false)) {
return next.process();
}
Span parentSpan = (Span) context.getData(PARENT_SPAN_KEY).orElse(tracer.currentSpan());
HttpRequest request = context.getHttpRequest();
final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl());
Span.Builder spanBuilder = tracer.spanBuilder().name(urlBuilder.getPath())
.setParent(parentSpan.context());
spanBuilder.kind(Span.Kind.CLIENT);
Span span = spanBuilder.start();
if (!span.isNoop()) {
addSpanRequestAttributes(span, request, context);
}
TraceContext traceContext = span.context();
if (isValid(traceContext)) {
propagator.inject(traceContext, request, contextSetter);
}
return next.process()
.doOnEach(SleuthHttpPolicy::handleResponse)
.contextWrite(Context.of("TRACING_SPAN", span));
} | class SleuthHttpPolicy implements HttpPipelinePolicy, Ordered {
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE;
}
private final Tracer tracer;
private final Propagator propagator;
private static final String HTTP_USER_AGENT = "http.user_agent";
private static final String HTTP_METHOD = "http.method";
private static final String HTTP_URL = "http.url";
private static final String HTTP_STATUS_CODE = "http.status_code";
private static final String REQUEST_ID = "x-ms-request-id";
private static final String AZ_NAMESPACE_KEY = "az.namespace";
public SleuthHttpPolicy(Tracer tracer, Propagator propagator) {
Assert.notNull(tracer, "tracer must not be null!");
Assert.notNull(propagator, "propagator must not be null!");
this.tracer = tracer;
this.propagator = propagator;
}
@Override
private static void addSpanRequestAttributes(Span span, HttpRequest request,
HttpPipelineCallContext context) {
putTagIfNotEmptyOrNull(span, HTTP_USER_AGENT,
request.getHeaders().getValue("User-Agent"));
putTagIfNotEmptyOrNull(span, HTTP_METHOD, request.getHttpMethod().toString());
putTagIfNotEmptyOrNull(span, HTTP_URL, request.getUrl().toString());
Optional<Object> tracingNamespace = context.getData(AZ_TRACING_NAMESPACE_KEY);
tracingNamespace.ifPresent(o -> putTagIfNotEmptyOrNull(span, AZ_NAMESPACE_KEY,
o.toString()));
}
private static void putTagIfNotEmptyOrNull(Span span, String key, String value) {
if (!CoreUtils.isNullOrEmpty(value)) {
span.tag(key, value);
}
}
/**
* Handles retrieving the information from the service response and ending the span.
*
* @param signal Reactive Stream signal fired by Reactor.
*/
private static void handleResponse(Signal<? extends HttpResponse> signal) {
if (signal.isOnComplete() || signal.isOnSubscribe()) {
return;
}
ContextView context = signal.getContextView();
Optional<Span> tracingSpan = context.getOrEmpty("TRACING_SPAN");
if (!tracingSpan.isPresent()) {
return;
}
Span span = tracingSpan.get();
HttpResponse httpResponse = null;
Throwable error = null;
if (signal.isOnNext()) {
httpResponse = signal.get();
} else {
error = signal.getThrowable();
if (error instanceof HttpResponseException) {
HttpResponseException exception = (HttpResponseException) error;
httpResponse = exception.getResponse();
}
}
spanEnd(span, httpResponse, error);
}
/**
* Sets status information and ends the span.
*
* @param span Span to end.
* @param response Response from the service.
* @param error Potential error returned from the service.
*/
private static void spanEnd(Span span, HttpResponse response, Throwable error) {
if (!span.isNoop()) {
int statusCode = 0;
String requestId = null;
if (response != null) {
statusCode = response.getStatusCode();
requestId = response.getHeaderValue(REQUEST_ID);
}
putTagIfNotEmptyOrNull(span, REQUEST_ID, requestId);
span.tag(HTTP_STATUS_CODE, String.valueOf(statusCode));
span = HttpTraceUtil.setSpanStatus(span, statusCode, error);
}
span.end();
}
private final Propagator.Setter<HttpRequest> contextSetter =
(request, key, value) -> request.getHeaders().set(key, value);
} | class SleuthHttpPolicy implements HttpPipelinePolicy {
private final Tracer tracer;
private final Propagator propagator;
private static final String HTTP_USER_AGENT = "http.user_agent";
private static final String HTTP_METHOD = "http.method";
private static final String HTTP_URL = "http.url";
private static final String HTTP_STATUS_CODE = "http.status_code";
private static final String REQUEST_ID = "x-ms-request-id";
private static final String AZ_NAMESPACE_KEY = "az.namespace";
public SleuthHttpPolicy(Tracer tracer, Propagator propagator) {
Assert.notNull(tracer, "tracer must not be null!");
Assert.notNull(propagator, "propagator must not be null!");
this.tracer = tracer;
this.propagator = propagator;
}
@Override
private static void addSpanRequestAttributes(Span span, HttpRequest request,
HttpPipelineCallContext context) {
putTagIfNotEmptyOrNull(span, HTTP_USER_AGENT,
request.getHeaders().getValue("User-Agent"));
putTagIfNotEmptyOrNull(span, HTTP_METHOD, request.getHttpMethod().toString());
putTagIfNotEmptyOrNull(span, HTTP_URL, request.getUrl().toString());
Optional<Object> tracingNamespace = context.getData(AZ_TRACING_NAMESPACE_KEY);
tracingNamespace.ifPresent(o -> putTagIfNotEmptyOrNull(span, AZ_NAMESPACE_KEY,
o.toString()));
}
private static void putTagIfNotEmptyOrNull(Span span, String key, String value) {
if (!CoreUtils.isNullOrEmpty(value)) {
span.tag(key, value);
}
}
/**
* Handles retrieving the information from the service response and ending the span.
*
* @param signal Reactive Stream signal fired by Reactor.
*/
private static void handleResponse(Signal<? extends HttpResponse> signal) {
if (signal.isOnComplete() || signal.isOnSubscribe()) {
return;
}
ContextView context = signal.getContextView();
Optional<Span> tracingSpan = context.getOrEmpty("TRACING_SPAN");
if (!tracingSpan.isPresent()) {
return;
}
Span span = tracingSpan.get();
HttpResponse httpResponse = null;
Throwable error = null;
if (signal.isOnNext()) {
httpResponse = signal.get();
} else {
error = signal.getThrowable();
if (error instanceof HttpResponseException) {
HttpResponseException exception = (HttpResponseException) error;
httpResponse = exception.getResponse();
}
}
spanEnd(span, httpResponse, error);
}
/**
* Sets status information and ends the span.
*
* @param span Span to end.
* @param response Response from the service.
* @param error Potential error returned from the service.
*/
private static void spanEnd(Span span, HttpResponse response, Throwable error) {
if (!span.isNoop()) {
int statusCode = 0;
String requestId = null;
if (response != null) {
statusCode = response.getStatusCode();
requestId = response.getHeaderValue(REQUEST_ID);
}
putTagIfNotEmptyOrNull(span, REQUEST_ID, requestId);
span.tag(HTTP_STATUS_CODE, String.valueOf(statusCode));
span = HttpTraceUtil.setSpanStatus(span, statusCode, error);
}
span.end();
}
private final Propagator.Setter<HttpRequest> contextSetter =
(request, key, value) -> request.getHeaders().set(key, value);
} | |
Due to @nullable, it can be remove too. | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (beanFactory != null && bean instanceof AbstractAzureHttpClientBuilderFactory) {
HttpPipelinePolicy policy = beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME, HttpPipelinePolicy.class);
if (policy != null) {
AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean;
builderFactory.addHttpPipelinePolicy(policy);
LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass());
} else {
LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass());
}
}
return bean;
} | if (beanFactory != null && bean instanceof AbstractAzureHttpClientBuilderFactory) { | public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AbstractAzureHttpClientBuilderFactory) {
HttpPipelinePolicy policy = beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME, HttpPipelinePolicy.class);
if (policy != null) {
AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean;
builderFactory.addHttpPipelinePolicy(policy);
LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass());
} else {
LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass());
}
}
return bean;
} | class AzureHttpClientBuilderFactoryBeanPostProcessor
implements BeanPostProcessor, Ordered, BeanFactoryAware {
private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class);
public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy";
@Nullable
private BeanFactory beanFactory;
@Override
public int getOrder() {
return LOWEST_PRECEDENCE;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
@Override
@SuppressWarnings({ "rawtypes", "unchecked" })
} | class AzureHttpClientBuilderFactoryBeanPostProcessor
implements BeanPostProcessor, Ordered, BeanFactoryAware {
private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class);
public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy";
private BeanFactory beanFactory;
@Override
public int getOrder() {
return LOWEST_PRECEDENCE;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
@Override
@SuppressWarnings({ "rawtypes", "unchecked" })
} |
shouldn't we do the same thing for 409s (ResourceAlreadyExists) as well? | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | if (statusCode == HttpConstants.StatusCodes.NOTFOUND) { | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
Please only remove the callstack etc. for 404 with Substatusode 0 - for any SubStatusCode != 0 (like ReadSessionNotAvailable 1002 etc.) we should leave the callstack | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | if (statusCode == HttpConstants.StatusCodes.NOTFOUND) { | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
> If we remove ERROR_TYPE/ERROR_MSG then we wont be seeing below attributes which was the initial requirement from @trask this was probably before I understood the otel semantic convention for exceptions 😅 @lmolkova is correct that `recordException()` is the right way to attach the exception data | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
And 412 | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | if (statusCode == HttpConstants.StatusCodes.NOTFOUND) { | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
We talked through all these and decided 404 is more regular business scenario then others. We will wait and watch for other error on customer demand basis | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | if (statusCode == HttpConstants.StatusCodes.NOTFOUND) { | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
Yea sure will add substatus check as well. | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | if (statusCode == HttpConstants.StatusCodes.NOTFOUND) { | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
BTW, why are we adding those? - `tracer.end` will do `span.recordException(t)` https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core-tracing-opentelemetry/src/main/java/com/azure/core/tracing/opentelemetry/implementation/HttpTraceUtil.java#L53 - which internally record common exception attributes https://github.com/open-telemetry/opentelemetry-java/blob/main/sdk/trace/src/main/java/io/opentelemetry/sdk/trace/RecordEventsReadableSpan.java#L406-L409 So we can remove all ERROR_TYPE/ERROR_MSG attributes logic? | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
I believe 404 statusCode is enough and the core tracer will populate all the info needed. https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core-tracing-opentelemetry/src/main/java/com/azure/core/tracing/opentelemetry/implementation/HttpTraceUtil.java#L179 Can we avoid populating custom error codes? | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
Done, added 404/0 check | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | if (statusCode == HttpConstants.StatusCodes.NOTFOUND) { | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
If we remove ERROR_TYPE/ERROR_MSG then we wont be seeing below attributes which was the initial requirement from @trask  | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
If we remove ERROR_TYPE/ERROR_MSG then we wont be seeing below attributes which was the initial requirement from @trask  | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.