comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
I'd get rid of `newContext` and just work with `context` directly. ```suggestion options = options == null ? new DataLakeFileInputStreamOptions() : options; if (options.isUpn() != null) { HttpHeaders headers = new HttpHeaders(); headers.set("x-ms-upn", options.isUpn() ? "true" : "false"); if (context == null) { context = new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers); } else { context = context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers); } } ``` | public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
Context newContext;
options = options == null ? new DataLakeFileInputStreamOptions() : options;
if (options.isUpn() != null) {
HttpHeaders headers = new HttpHeaders();
headers.set("x-ms-upn", options.isUpn() ? "true" : "false");
if (context == null) {
newContext = new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
} else {
newContext = context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
}
} else {
newContext = null;
}
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, newContext);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
} | } | public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, context);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
Context newContext;
options = options == null ? new ReadToFileOptions() : options;
if (options.isUpn() != null) {
HttpHeaders headers = new HttpHeaders();
headers.set("x-ms-upn", options.isUpn() ? "true" : "false");
if (context == null) {
newContext = new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
} else {
newContext = context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
}
} else {
newContext = null;
}
ReadToFileOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(finalOptions.getFilePath())
.setRange(Transforms.toBlobRange(finalOptions.getRange()))
.setParallelTransferOptions(finalOptions.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(finalOptions.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(finalOptions.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(finalOptions.isRangeGetContentMd5())
.setOpenOptions(finalOptions.getOpenOptions()), timeout, newContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(options.getFilePath())
.setRange(Transforms.toBlobRange(options.getRange()))
.setParallelTransferOptions(options.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(options.isRangeGetContentMd5())
.setOpenOptions(options.getOpenOptions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} |
Do we need to new up `DataLakeFileInputStreamOptions` here? If it's null shouldn't `options.isUpn()` be null? Could we just make the if check below `if (options != null && options.isUpn() != null) {` | public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
Context newContext;
options = options == null ? new DataLakeFileInputStreamOptions() : options;
if (options.isUpn() != null) {
HttpHeaders headers = new HttpHeaders();
headers.set("x-ms-upn", options.isUpn() ? "true" : "false");
if (context == null) {
newContext = new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
} else {
newContext = context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
}
} else {
newContext = null;
}
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, newContext);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
} | options = options == null ? new DataLakeFileInputStreamOptions() : options; | public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, context);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
Context newContext;
options = options == null ? new ReadToFileOptions() : options;
if (options.isUpn() != null) {
HttpHeaders headers = new HttpHeaders();
headers.set("x-ms-upn", options.isUpn() ? "true" : "false");
if (context == null) {
newContext = new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
} else {
newContext = context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
}
} else {
newContext = null;
}
ReadToFileOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(finalOptions.getFilePath())
.setRange(Transforms.toBlobRange(finalOptions.getRange()))
.setParallelTransferOptions(finalOptions.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(finalOptions.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(finalOptions.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(finalOptions.isRangeGetContentMd5())
.setOpenOptions(finalOptions.getOpenOptions()), timeout, newContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(options.getFilePath())
.setRange(Transforms.toBlobRange(options.getRange()))
.setParallelTransferOptions(options.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(options.isRangeGetContentMd5())
.setOpenOptions(options.getOpenOptions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} |
I think the comments I left in the other location setting this could apply everywhere | public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
Context newContext;
options = options == null ? new ReadToFileOptions() : options;
if (options.isUpn() != null) {
HttpHeaders headers = new HttpHeaders();
headers.set("x-ms-upn", options.isUpn() ? "true" : "false");
if (context == null) {
newContext = new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
} else {
newContext = context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
}
} else {
newContext = null;
}
ReadToFileOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(finalOptions.getFilePath())
.setRange(Transforms.toBlobRange(finalOptions.getRange()))
.setParallelTransferOptions(finalOptions.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(finalOptions.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(finalOptions.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(finalOptions.isRangeGetContentMd5())
.setOpenOptions(finalOptions.getOpenOptions()), timeout, newContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
} | options = options == null ? new ReadToFileOptions() : options; | public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(options.getFilePath())
.setRange(Transforms.toBlobRange(options.getRange()))
.setParallelTransferOptions(options.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(options.isRangeGetContentMd5())
.setOpenOptions(options.getOpenOptions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
Context newContext;
options = options == null ? new DataLakeFileInputStreamOptions() : options;
if (options.isUpn() != null) {
HttpHeaders headers = new HttpHeaders();
headers.set("x-ms-upn", options.isUpn() ? "true" : "false");
if (context == null) {
newContext = new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
} else {
newContext = context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
}
} else {
newContext = null;
}
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, newContext);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, context);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} |
Just a nit, this doesn't do anything useful. Feel free to pass `context` on to the `openInputStream` method. The only time you'd need to do this is if the `context` was used in a lambda as variables used from outside the scope of the lambda have to be either final or effectively final. ```java int value = 0; // This is fine as it isn't changed anywhere below String string = "hi"; string = string + " hi"; // string would have to be declared as the value for another variable as this is modified String stringForLambda = string; // This works as it isn't mutated again someCallTakingALambda(() -> { // Any variable used from outside of "{ ... }" must either be declared with "final" (final int finalInt) or not be mutated within the outer scope. }); ``` ``` | public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, finalContext);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
} | Context finalContext = context; | public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, context);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
ReadToFileOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(finalOptions.getFilePath())
.setRange(Transforms.toBlobRange(finalOptions.getRange()))
.setParallelTransferOptions(finalOptions.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(finalOptions.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(finalOptions.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(finalOptions.isRangeGetContentMd5())
.setOpenOptions(finalOptions.getOpenOptions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(options.getFilePath())
.setRange(Transforms.toBlobRange(options.getRange()))
.setParallelTransferOptions(options.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(options.isRangeGetContentMd5())
.setOpenOptions(options.getOpenOptions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} |
In this case `Context finalContext = context` is needed as the method modifies context, meaning it's no longer effectively final. But `finalOptions` isn't needed as nothing is updating it, meaning it is effectively final. | public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
ReadToFileOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(finalOptions.getFilePath())
.setRange(Transforms.toBlobRange(finalOptions.getRange()))
.setParallelTransferOptions(finalOptions.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(finalOptions.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(finalOptions.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(finalOptions.isRangeGetContentMd5())
.setOpenOptions(finalOptions.getOpenOptions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
} | ReadToFileOptions finalOptions = options; | public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(options.getFilePath())
.setRange(Transforms.toBlobRange(options.getRange()))
.setParallelTransferOptions(options.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(options.isRangeGetContentMd5())
.setOpenOptions(options.getOpenOptions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, finalContext);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} | class DataLakeFileClient extends DataLakePathClient {
/**
* Indicates the maximum number of bytes that can be sent in a call to upload.
*/
private static final long MAX_APPEND_FILE_BYTES = DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES;
private static final ClientLogger LOGGER = new ClientLogger(DataLakeFileClient.class);
private final DataLakeFileAsyncClient dataLakeFileAsyncClient;
DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) {
super(pathAsyncClient, blockBlobClient);
this.dataLakeFileAsyncClient = pathAsyncClient;
}
private DataLakeFileClient(DataLakePathClient dataLakePathClient) {
super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient);
this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient);
}
/**
* Gets the URL of the file represented by this client on the Data Lake service.
*
* @return the URL.
*/
public String getFileUrl() {
return getPathUrl();
}
/**
* Gets the path of this file, not including the name of the resource itself.
*
* @return The path of the file.
*/
public String getFilePath() {
return getObjectPath();
}
/**
* Gets the name of this file, not including its full path.
*
* @return The name of the file.
*/
public String getFileName() {
return getObjectName();
}
/**
* Creates a new {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link DataLakeFileClient} with the specified {@code customerProvidedKey}.
*/
public DataLakeFileClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new DataLakeFileClient(dataLakeFileAsyncClient.getCustomerProvidedKeyAsyncClient(customerProvidedKey),
blockBlobClient.getCustomerProvidedKeyClient(Transforms.toBlobCustomerProvidedKey(customerProvidedKey)));
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.delete -->
* <pre>
* client.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.delete -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes a file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* client.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExists -->
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
* @return {@code true} if file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(new DataLakePathDeleteOptions(), null, Context.NONE).getValue();
}
/**
* Deletes a file if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
* <pre>
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakePathDeleteOptions options = new DataLakePathDeleteOptions&
* .setRequestConditions&
*
* Response<Boolean> response = client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.deleteIfExistsWithResponse
*
* <p>For more information see the
* <a href="https:
* Docs</a></p>
*
* @param options {@link DataLakePathDeleteOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 200, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(DataLakePathDeleteOptions options, Duration timeout,
Context context) {
return StorageImplUtils.blockWithOptionalTimeout(dataLakeFileAsyncClient
.deleteIfExistsWithResponse(options, context), timeout);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length) {
return upload(data, length, false);
}
/**
* Creates a new file. By default, this method will not overwrite an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data) {
return upload(data, false);
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param length The exact length of the data. It is important that this value match precisely the length of the
* data provided in the {@link InputStream}.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(InputStream data, long length, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data, length).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file, or updates the content of an existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.upload
* <pre>
* try &
* boolean overwrite = false;
* client.upload&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.upload
*
* @param data The data to write to the blob. The data must be markable. This is in order to support retries. If
* the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark
* support.
* @param overwrite Whether to overwrite, should data exist on the file.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo upload(BinaryData data, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return uploadWithResponse(new FileParallelUploadOptions(data).setRequestConditions(requestConditions),
null, Context.NONE).getValue();
}
/**
* Creates a new file.
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadWithResponse&
* .setParallelTransferOptions&
* .setMetadata&
* .setPermissions&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse
*
* @param options {@link FileParallelUploadOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the uploaded path.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadWithResponse(FileParallelUploadOptions options, Duration timeout,
Context context) {
Objects.requireNonNull(options);
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadWithResponse(options)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file. By default, this method will not overwrite an
* existing file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath) {
uploadFromFile(filePath, false);
}
/**
* Creates a file, with the content of the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* try &
* boolean overwrite = false;
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param overwrite Whether to overwrite, should the file already exist
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, boolean overwrite) {
DataLakeRequestConditions requestConditions = null;
if (!overwrite) {
if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.FILE_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)
&& exists()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS));
}
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
uploadFromFile(filePath, null, null, null, requestConditions, null);
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* client.uploadFromFile&
* System.out.println&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout) {
Mono<Void> upload = this.dataLakeFileAsyncClient.uploadFromFile(
filePath, parallelTransferOptions, headers, metadata, requestConditions);
try {
StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Creates a file, with the content of the specified file.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
* <pre>
* PathHttpHeaders headers = new PathHttpHeaders&
* .setContentMd5&
* .setContentLanguage&
* .setContentType&
*
* Map<String, String> metadata = Collections.singletonMap&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
* Long blockSize = 100L * 1024L * 1024L; &
* ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions&
*
* try &
* Response<PathInfo> response = client.uploadFromFileWithResponse&
* metadata, requestConditions, timeout, new Context&
* System.out.printf&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFileWithResponse
*
* @param filePath Path of the file to upload
* @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading.
* @param headers {@link PathHttpHeaders}
* @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link DataLakeRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing information about the uploaded path.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> uploadFromFileWithResponse(String filePath, ParallelTransferOptions parallelTransferOptions,
PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Mono<Response<PathInfo>> upload = this.dataLakeFileAsyncClient.uploadFromFileWithResponse(
filePath, parallelTransferOptions, headers, metadata, requestConditions)
.contextWrite(FluxUtil.toReactorContext(context));
try {
return StorageImplUtils.blockWithOptionalTimeout(upload, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(InputStream data, long fileOffset, long length) {
appendWithResponse(data, fileOffset, length, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.append
* <pre>
* client.append&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.append
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void append(BinaryData data, long fileOffset) {
appendWithResponse(data, fileOffset, null, null, null, Context.NONE);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
byte[] contentMd5, String leaseId, Duration timeout, Context context) {
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
return appendWithResponse(data, fileOffset, length, appendOptions, timeout, context);
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param length The exact length of the data.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length,
BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fbb, fileOffset, length,
appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
*
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the
* received data and fail the request if it does not match the provided MD5.
* @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on
* the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset, byte[] contentMd5, String leaseId,
Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions()
.setLeaseId(leaseId)
.setContentHash(contentMd5)
.setFlush(null);
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Appends data to the specified resource to later be flushed (written) by a call to flush
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
* <pre>
* BinaryData binaryData = BinaryData.fromStream&
* FileRange range = new FileRange&
* byte[] contentMd5 = new byte[0]; &
* DataLakeFileAppendOptions appendOptions = new DataLakeFileAppendOptions&
* .setLeaseId&
* .setContentHash&
* .setFlush&
* Response<Void> response = client.appendWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param data The data to write to the file.
* @param fileOffset The position where the data is to be appended.
* @param appendOptions {@link DataLakeFileAppendOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> appendWithResponse(BinaryData data, long fileOffset,
DataLakeFileAppendOptions appendOptions, Duration timeout, Context context) {
Objects.requireNonNull(data);
Flux<ByteBuffer> fluxData = data.toFluxByteBuffer();
Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse(fluxData, fileOffset,
data.getLength(), appendOptions, context);
try {
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
} catch (UncheckedIOException e) {
throw LOGGER.logExceptionAsError(e);
}
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
* <p>By default this method will not overwrite existing data.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @return Information about the created resource.
* @deprecated See {@link
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public PathInfo flush(long position) {
return flush(position, false);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flush
* <pre>
* boolean overwrite = true;
* client.flush&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flush
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param overwrite Whether to overwrite, should data exist on the file.
*
* @return Information about the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathInfo flush(long position, boolean overwrite) {
DataLakeRequestConditions requestConditions = new DataLakeRequestConditions();
if (!overwrite) {
requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD);
}
return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue();
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* requestConditions, timeout, new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param retainUncommittedData Whether uncommitted data is to be retained after the operation.
* @param close Whether a file changed event raised indicates completion (true) or modification (false).
* @param httpHeaders {@link PathHttpHeaders httpHeaders}
* @param requestConditions {@link DataLakeRequestConditions requestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close,
PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions()
.setUncommittedDataRetained(retainUncommittedData)
.setClose(close)
.setPathHttpHeaders(httpHeaders)
.setRequestConditions(requestConditions);
return flushWithResponse(position, flushOptions, timeout, context);
}
/**
* Flushes (writes) data previously appended to the file through a call to append.
* The previously uploaded data must be contiguous.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
* byte[] contentMd5 = new byte[0]; &
* boolean retainUncommittedData = false;
* boolean close = false;
* PathHttpHeaders httpHeaders = new PathHttpHeaders&
* .setContentLanguage&
* .setContentType&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
*
* Integer leaseDuration = 15;
*
* DataLakeFileFlushOptions flushOptions = new DataLakeFileFlushOptions&
* .setUncommittedDataRetained&
* .setClose&
* .setPathHttpHeaders&
* .setRequestConditions&
* .setLeaseAction&
* .setLeaseDuration&
* .setProposedLeaseId&
*
* Response<PathInfo> response = client.flushWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a></p>
*
* @param position The length of the file after all data has been written.
* @param flushOptions {@link DataLakeFileFlushOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing the information of the created resource.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathInfo> flushWithResponse(long position, DataLakeFileFlushOptions flushOptions, Duration timeout,
Context context) {
Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, flushOptions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Reads the entire file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.read
* <pre>
* client.read&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.read
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public void read(OutputStream stream) {
readWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Reads a range of bytes from a file into an output stream.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
* <pre>
* FileRange range = new FileRange&
* DownloadRetryOptions options = new DownloadRetryOptions&
*
* System.out.printf&
* client.readWithResponse&
* timeout, new Context&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link FileRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
}, LOGGER);
}
/**
* Opens a file input stream to download the file. Locks on ETags.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
* <pre>
* DataLakeFileOpenInputStreamResult inputStream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream -->
*
* @return An {@link InputStream} object that represents the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream() {
return openInputStream(null);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* DataLakeFileInputStreamOptions options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult streamResult = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options) {
return openInputStream(options, Context.NONE);
}
/**
* Opens a file input stream to download the specified range of the file. Defaults to ETag locking if the option
* is not specified.
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
* <pre>
* options = new DataLakeFileInputStreamOptions&
* .setRequestConditions&
* DataLakeFileOpenInputStreamResult stream = client.openInputStream&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openInputStream
*
* @param options {@link DataLakeFileInputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link DataLakeFileOpenInputStreamResult} object that contains the stream to use for reading from the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, context);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Transforms.toPathProperties(inputStream.getProperties()));
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
*
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream() {
return getOutputStream(null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options) {
return getOutputStream(options, null);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it
* will be overwritten.
* <p>
* To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
* </p>
*
* @param options {@link DataLakeFileOutputStreamOptions}
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link OutputStream} that can be used to write to the file.
* @throws DataLakeStorageException If a storage service error occurred.
*/
public OutputStream getOutputStream(DataLakeFileOutputStreamOptions options, Context context) {
BlockBlobOutputStreamOptions convertedOptions = Transforms.toBlockBlobOutputStreamOptions(options);
return blockBlobClient.getBlobOutputStream(convertedOptions, context);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(String filePath, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return readToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile
* <pre>
* boolean overwrite1 = false; &
* client.readToFile&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* FileRange fileRange = new FileRange&
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions&
* Set<OpenOption> openOptions = new HashSet<>&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
*
* client.readToFileWithResponse&
* downloadRetryOptions, null, false, openOptions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link FileRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link DataLakeRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(String filePath, FileRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions,
Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(filePath)
.setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions)
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(downloadRetryOptions))
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&
* options.setRange&
* options.setDownloadRetryOptions&
* options.setOpenOptions&
* StandardOpenOption.WRITE, StandardOpenOption.READ&
* options.setParallelTransferOptions&
* options.setDataLakeRequestConditions&
* options.setRangeGetContentMd5&
*
* client.readToFileWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Moves the file to another location within the file system.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
* <pre>
* DataLakeDirectoryAsyncClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @return A {@link DataLakeFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public DataLakeFileClient rename(String destinationFileSystem, String destinationPath) {
return renameWithResponse(destinationFileSystem, destinationPath, null, null, null, null).getValue();
}
/**
* Moves the file to another location within the file system.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
* <pre>
* DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions&
*
* DataLakeFileClient newRenamedClient = client.renameWithResponse&
* sourceRequestConditions, destinationRequestConditions, timeout, new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse
*
* @param destinationFileSystem The file system of the destination within the account.
* {@code null} for the current file system.
* @param destinationPath Relative path from the file system to rename the file to, excludes the file system name.
* For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path
* in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt"
* @param sourceRequestConditions {@link DataLakeRequestConditions} against the source.
* @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
*
* @return A {@link Response} whose {@link Response
* used to interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<DataLakeFileClient> renameWithResponse(String destinationFileSystem, String destinationPath,
DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions,
Duration timeout, Context context) {
Mono<Response<DataLakeFileClient>> response =
dataLakeFileAsyncClient.renameWithResponse(destinationFileSystem, destinationPath,
sourceRequestConditions, destinationRequestConditions, context)
.map(asyncResponse ->
new SimpleResponse<>(asyncResponse.getRequest(), asyncResponse.getStatusCode(),
asyncResponse.getHeaders(),
new DataLakeFileClient(new DataLakeFileAsyncClient(asyncResponse.getValue()),
new SpecializedBlobClientBuilder()
.blobAsyncClient(asyncResponse.getValue().blockBlobAsyncClient)
.buildBlockBlobClient())));
Response<DataLakeFileClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout);
return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue()));
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* InputStream inputStream = client.openQueryInputStream&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param expression The query expression.
* @return An <code>InputStream</code> object that represents the stream to use for reading the query response.
*/
public InputStream openQueryInputStream(String expression) {
return openQueryInputStreamWithResponse(new FileQueryOptions(expression)).getValue();
}
/**
* Opens an input stream to query the file.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
* <pre>
* String expression = "SELECT * from BlobStorage";
* FileQuerySerialization input = new FileQueryDelimitedSerialization&
* .setColumnSeparator&
* .setEscapeChar&
* .setRecordSeparator&
* .setHeadersPresent&
* .setFieldQuote&
* FileQuerySerialization output = new FileQueryJsonSerialization&
* .setRecordSeparator&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* .setLeaseId&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
*
* InputStream inputStream = client.openQueryInputStreamWithResponse&
* &
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @return A response containing status code and HTTP headers including an <code>InputStream</code> object
* that represents the stream to use for reading the query response.
*/
public Response<InputStream> openQueryInputStreamWithResponse(FileQueryOptions queryOptions) {
FileQueryAsyncResponse response = dataLakeFileAsyncClient.queryWithResponse(queryOptions)
.block();
if (response == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("Query response cannot be null"));
}
return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new FluxInputStream(response.getValue()), response.getDeserializedHeaders());
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.query
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* client.query&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.query
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param expression The query expression.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public void query(OutputStream stream, String expression) {
queryWithResponse(new FileQueryOptions(expression, stream), null, Context.NONE);
}
/**
* Queries an entire file into an output stream.
*
* <p>For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
* <pre>
* ByteArrayOutputStream queryData = new ByteArrayOutputStream&
* String expression = "SELECT * from BlobStorage";
* FileQueryJsonSerialization input = new FileQueryJsonSerialization&
* .setRecordSeparator&
* FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization&
* .setEscapeChar&
* .setColumnSeparator&
* .setRecordSeparator&
* .setFieldQuote&
* .setHeadersPresent&
* DataLakeRequestConditions requestConditions = new DataLakeRequestConditions&
* Consumer<FileQueryError> errorConsumer = System.out::println;
* Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println&
* + progress.getBytesScanned&
* FileQueryOptions queryOptions = new FileQueryOptions&
* .setInputSerialization&
* .setOutputSerialization&
* .setRequestConditions&
* .setErrorConsumer&
* .setProgressConsumer&
* System.out.printf&
* client.queryWithResponse&
* .getStatusCode&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse
*
* @param queryOptions {@link FileQueryOptions The query options}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null.
*/
public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Duration timeout, Context context) {
return DataLakeImplUtils.returnOrConvertException(() -> {
BlobQueryResponse response = blockBlobClient.queryWithResponse(
Transforms.toBlobQueryOptions(queryOptions), timeout, context);
return Transforms.toFileQueryResponse(response);
}, LOGGER);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* client.scheduleDeletion&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion
*
* @param options Schedule deletion parameters.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}
/**
* Schedules the file for deletion.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
* <pre>
* FileScheduleDeletionOptions options = new FileScheduleDeletionOptions&
* Context context = new Context&
*
* client.scheduleDeletionWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
} |
nit: `set(String, String)` is deprecated and `set(HttpHeaderName, String)` should be used instead. I recommend creating a constant in this class `private static final HttpHeaderName X_MS_UPN = HttpHeaderName.fromString("x-ms-upn)` and changing this call to `headers.set(X_MS_UPN, Boolean.toString(value))` | public static Context addUpnHeader(Supplier<Boolean> upnHeaderValue, Context context) {
Boolean value = upnHeaderValue.get();
if (value == null) {
return context;
}
HttpHeaders headers = new HttpHeaders();
headers.set("x-ms-upn", Boolean.toString(value));
if (context == null) {
return new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
} else {
return context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
}
} | headers.set("x-ms-upn", Boolean.toString(value)); | public static Context addUpnHeader(Supplier<Boolean> upnHeaderValue, Context context) {
Boolean value = upnHeaderValue.get();
if (value == null) {
return context;
}
HttpHeaders headers = new HttpHeaders();
headers.set(X_MS_UPN, Boolean.toString(value));
if (context == null) {
return new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
} else {
return context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
}
} | class BuilderHelper {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
static {
Map<String, String> properties = CoreUtils.getProperties("azure-storage-file-datalake.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
}
/**
* Constructs a {@link HttpPipeline} from values passed from a builder.
*
* @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present.
* @param tokenCredential {@link TokenCredential} if present.
* @param azureSasCredential {@link AzureSasCredential} if present.
* @param endpoint The endpoint for the client.
* @param retryOptions Storage retry options to set in the retry policy.
* @param coreRetryOptions Core retry options to set in the retry policy.
* @param logOptions Logging options to set in the logging policy.
* @param clientOptions Client options.
* @param httpClient HttpClient to use in the builder.
* @param perCallPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline per call.
* @param perRetryPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline per retry.
* @param configuration Configuration store contain environment settings.
* @param audience {@link DataLakeAudience} used to determine the audience of the path.
* @param logger {@link ClientLogger} used to log any exception.
* @return A new {@link HttpPipeline} from the passed values.
*/
public static HttpPipeline buildPipeline(
StorageSharedKeyCredential storageSharedKeyCredential,
TokenCredential tokenCredential, AzureSasCredential azureSasCredential, String endpoint,
RequestRetryOptions retryOptions, RetryOptions coreRetryOptions,
HttpLogOptions logOptions, ClientOptions clientOptions, HttpClient httpClient,
List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies,
Configuration configuration, DataLakeAudience audience, ClientLogger logger) {
CredentialValidator.validateSingleCredentialIsPresent(
storageSharedKeyCredential, tokenCredential, azureSasCredential, null, logger);
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(getUserAgentPolicy(configuration, logOptions, clientOptions));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, logger));
policies.add(new AddDatePolicy());
policies.add(new AddHeadersFromContextPolicy());
HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions);
if (headers != null) {
policies.add(new AddHeadersPolicy(headers));
}
policies.add(new MetadataValidationPolicy());
HttpPipelinePolicy credentialPolicy;
if (storageSharedKeyCredential != null) {
credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential);
} else if (tokenCredential != null) {
String scope = audience != null
? ((audience.toString().endsWith("/") ? audience + ".default" : audience + "/.default"))
: Constants.STORAGE_SCOPE;
credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, scope);
} else if (azureSasCredential != null) {
credentialPolicy = new AzureSasCredentialPolicy(azureSasCredential, false);
} else {
credentialPolicy = null;
}
if (credentialPolicy != null) {
policies.add(credentialPolicy);
}
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(getResponseValidationPolicy());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.tracer(createTracer(clientOptions))
.build();
}
/**
* Gets the default http log option for Storage Blob.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
HttpLogOptions defaultOptions = new HttpLogOptions();
DataLakeHeadersAndQueryParameters.getDataLakeHeaders().forEach(defaultOptions::addAllowedHeaderName);
DataLakeHeadersAndQueryParameters.getDataLakeQueryParameters().forEach(
defaultOptions::addAllowedQueryParamName);
return defaultOptions;
}
/**
* Gets the endpoint for the data lake service based on the parsed URL.
*
* @param parts The {@link BlobUrlParts} from the parse URL.
* @return The endpoint for the data lake service.
*/
public static String getEndpoint(BlobUrlParts parts) throws MalformedURLException {
if (ModelHelper.determineAuthorityIsIpStyle(parts.getHost())) {
return String.format("%s:
} else {
return String.format("%s:
}
}
/*
* Creates a {@link UserAgentPolicy} using the default blob module name and version.
*
* @param configuration Configuration store used to determine whether telemetry information should be included.
* @param logOptions Logging options to set in the logging policy.
* @param clientOptions Client options.
* @return The default {@link UserAgentPolicy} for the module.
*/
private static UserAgentPolicy getUserAgentPolicy(Configuration configuration, HttpLogOptions logOptions,
ClientOptions clientOptions) {
configuration = (configuration == null) ? Configuration.NONE : configuration;
String applicationId = CoreUtils.getApplicationId(clientOptions, logOptions);
return new UserAgentPolicy(applicationId, CLIENT_NAME, CLIENT_VERSION, configuration);
}
/*
* Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from
* the service.
*
* @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module.
*/
private static HttpPipelinePolicy getResponseValidationPolicy() {
return new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build();
}
/**
* Gets a BlobUserAgentModificationPolicy with the correct clientName and clientVersion.
*
* @return {@link BlobUserAgentModificationPolicy}
*/
public static BlobUserAgentModificationPolicy getBlobUserAgentModificationPolicy() {
return new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION);
}
/**
* Validates that the client is properly configured to use https.
*
* @param objectToCheck The object to check for.
* @param objectName The name of the object.
* @param endpoint The endpoint for the client.
*/
public static void httpsValidation(Object objectToCheck, String objectName, String endpoint, ClientLogger logger) {
if (objectToCheck != null && !BlobUrlParts.parse(endpoint).getScheme().equals(Constants.HTTPS)) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Using a(n) " + objectName + " requires https"));
}
}
private static Tracer createTracer(ClientOptions clientOptions) {
TracingOptions tracingOptions = clientOptions == null ? null : clientOptions.getTracingOptions();
return TracerProvider.getDefaultProvider()
.createTracer(CLIENT_NAME, CLIENT_VERSION, STORAGE_TRACING_NAMESPACE_VALUE, tracingOptions);
}
} | class BuilderHelper {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
private static final HttpHeaderName X_MS_UPN = HttpHeaderName.fromString("x-ms-upn");
static {
Map<String, String> properties = CoreUtils.getProperties("azure-storage-file-datalake.properties");
CLIENT_NAME = properties.getOrDefault("name", "UnknownName");
CLIENT_VERSION = properties.getOrDefault("version", "UnknownVersion");
}
/**
* Constructs a {@link HttpPipeline} from values passed from a builder.
*
* @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present.
* @param tokenCredential {@link TokenCredential} if present.
* @param azureSasCredential {@link AzureSasCredential} if present.
* @param endpoint The endpoint for the client.
* @param retryOptions Storage retry options to set in the retry policy.
* @param coreRetryOptions Core retry options to set in the retry policy.
* @param logOptions Logging options to set in the logging policy.
* @param clientOptions Client options.
* @param httpClient HttpClient to use in the builder.
* @param perCallPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline per call.
* @param perRetryPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline per retry.
* @param configuration Configuration store contain environment settings.
* @param audience {@link DataLakeAudience} used to determine the audience of the path.
* @param logger {@link ClientLogger} used to log any exception.
* @return A new {@link HttpPipeline} from the passed values.
*/
public static HttpPipeline buildPipeline(
StorageSharedKeyCredential storageSharedKeyCredential,
TokenCredential tokenCredential, AzureSasCredential azureSasCredential, String endpoint,
RequestRetryOptions retryOptions, RetryOptions coreRetryOptions,
HttpLogOptions logOptions, ClientOptions clientOptions, HttpClient httpClient,
List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies,
Configuration configuration, DataLakeAudience audience, ClientLogger logger) {
CredentialValidator.validateSingleCredentialIsPresent(
storageSharedKeyCredential, tokenCredential, azureSasCredential, null, logger);
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(getUserAgentPolicy(configuration, logOptions, clientOptions));
policies.add(new RequestIdPolicy());
policies.addAll(perCallPolicies);
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, logger));
policies.add(new AddDatePolicy());
policies.add(new AddHeadersFromContextPolicy());
HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions);
if (headers != null) {
policies.add(new AddHeadersPolicy(headers));
}
policies.add(new MetadataValidationPolicy());
HttpPipelinePolicy credentialPolicy;
if (storageSharedKeyCredential != null) {
credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential);
} else if (tokenCredential != null) {
String scope = audience != null
? ((audience.toString().endsWith("/") ? audience + ".default" : audience + "/.default"))
: Constants.STORAGE_SCOPE;
credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, scope);
} else if (azureSasCredential != null) {
credentialPolicy = new AzureSasCredentialPolicy(azureSasCredential, false);
} else {
credentialPolicy = null;
}
if (credentialPolicy != null) {
policies.add(credentialPolicy);
}
policies.addAll(perRetryPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(getResponseValidationPolicy());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(clientOptions)
.tracer(createTracer(clientOptions))
.build();
}
/**
* Gets the default http log option for Storage Blob.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
HttpLogOptions defaultOptions = new HttpLogOptions();
DataLakeHeadersAndQueryParameters.getDataLakeHeaders().forEach(defaultOptions::addAllowedHeaderName);
DataLakeHeadersAndQueryParameters.getDataLakeQueryParameters().forEach(
defaultOptions::addAllowedQueryParamName);
return defaultOptions;
}
/**
* Gets the endpoint for the data lake service based on the parsed URL.
*
* @param parts The {@link BlobUrlParts} from the parse URL.
* @return The endpoint for the data lake service.
*/
public static String getEndpoint(BlobUrlParts parts) throws MalformedURLException {
if (ModelHelper.determineAuthorityIsIpStyle(parts.getHost())) {
return String.format("%s:
} else {
return String.format("%s:
}
}
/*
* Creates a {@link UserAgentPolicy} using the default blob module name and version.
*
* @param configuration Configuration store used to determine whether telemetry information should be included.
* @param logOptions Logging options to set in the logging policy.
* @param clientOptions Client options.
* @return The default {@link UserAgentPolicy} for the module.
*/
private static UserAgentPolicy getUserAgentPolicy(Configuration configuration, HttpLogOptions logOptions,
ClientOptions clientOptions) {
configuration = (configuration == null) ? Configuration.NONE : configuration;
String applicationId = CoreUtils.getApplicationId(clientOptions, logOptions);
return new UserAgentPolicy(applicationId, CLIENT_NAME, CLIENT_VERSION, configuration);
}
/*
* Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from
* the service.
*
* @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module.
*/
private static HttpPipelinePolicy getResponseValidationPolicy() {
return new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build();
}
/**
* Gets a BlobUserAgentModificationPolicy with the correct clientName and clientVersion.
*
* @return {@link BlobUserAgentModificationPolicy}
*/
public static BlobUserAgentModificationPolicy getBlobUserAgentModificationPolicy() {
return new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION);
}
/**
* Validates that the client is properly configured to use https.
*
* @param objectToCheck The object to check for.
* @param objectName The name of the object.
* @param endpoint The endpoint for the client.
*/
public static void httpsValidation(Object objectToCheck, String objectName, String endpoint, ClientLogger logger) {
if (objectToCheck != null && !BlobUrlParts.parse(endpoint).getScheme().equals(Constants.HTTPS)) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Using a(n) " + objectName + " requires https"));
}
}
private static Tracer createTracer(ClientOptions clientOptions) {
TracingOptions tracingOptions = clientOptions == null ? null : clientOptions.getTracingOptions();
return TracerProvider.getDefaultProvider()
.createTracer(CLIENT_NAME, CLIENT_VERSION, STORAGE_TRACING_NAMESPACE_VALUE, tracingOptions);
}
} |
Shouldn't the file name be encoded when it is part of the URL? | public void getNonEncodedFileName(String fileName) {
primaryDirectoryClient.create();
ShareFileClient fileClient = primaryDirectoryClient.getFileClient(fileName);
assertEquals(primaryDirectoryClient.getDirectoryPath() + "/" + fileName, fileClient.getFilePath());
fileClient.create(1024);
assertTrue(fileClient.exists());
} | assertEquals(primaryDirectoryClient.getDirectoryPath() + "/" + fileName, fileClient.getFilePath()); | public void getNonEncodedFileName(String fileName) {
primaryDirectoryClient.create();
ShareFileClient fileClient = primaryDirectoryClient.getFileClient(fileName);
assertEquals(primaryDirectoryClient.getDirectoryPath() + "/" + fileName, fileClient.getFilePath());
fileClient.create(1024);
assertTrue(fileClient.exists());
} | class DirectoryApiTests extends FileShareTestBase {
private ShareDirectoryClient primaryDirectoryClient;
private ShareClient shareClient;
private String directoryPath;
private String shareName;
private static Map<String, String> testMetadata;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
directoryPath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryDirectoryClient = directoryBuilderHelper(shareName, directoryPath).buildDirectoryClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes>of(NtfsFileAttributes.NORMAL));
}
@Test
public void getDirectoryUrl() {
String accountName = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()).getAccountName();
String expectURL = String.format("https:
directoryPath);
String directoryURL = primaryDirectoryClient.getDirectoryUrl();
assertEquals(expectURL, directoryURL);
}
@Test
public void getShareSnapshotUrl() {
String accountName = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()).getAccountName();
String expectURL = String.format("https:
directoryPath);
ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot();
expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot();
ShareDirectoryClient newDirClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot())
.buildClient().getDirectoryClient(directoryPath);
String directoryURL = newDirClient.getDirectoryUrl();
assertEquals(expectURL, directoryURL);
String snapshotEndpoint = String.format("https:
shareName, directoryPath, shareSnapshotInfo.getSnapshot());
ShareDirectoryClient client = getDirectoryClient(StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint);
assertEquals(client.getDirectoryUrl(), snapshotEndpoint);
}
@Test
public void getSubDirectoryClient() {
ShareDirectoryClient subDirectoryClient = primaryDirectoryClient.getSubdirectoryClient("testSubDirectory");
assertInstanceOf(ShareDirectoryClient.class, subDirectoryClient);
}
@Test
public void getFileClient() {
ShareFileClient fileClient = primaryDirectoryClient.getFileClient("testFile");
assertInstanceOf(ShareFileClient.class, fileClient);
}
private static Stream<Arguments> getNonEncodedFileNameSupplier() {
return Stream.of(
Arguments.of("test%test"),
Arguments.of("%Россия 한국 中国!"),
Arguments.of("%E6%96%91%E9%BB%9E"),
Arguments.of("斑點")
);
}
@ParameterizedTest
@MethodSource("getNonEncodedFileNameSupplier")
@Test
public void exists() {
primaryDirectoryClient.create();
assertTrue(primaryDirectoryClient.exists());
}
@Test
public void doesNotExist() {
assertFalse(primaryDirectoryClient.exists());
}
@Test
public void existsError() {
primaryDirectoryClient = directoryBuilderHelper(shareName, directoryPath)
.sasToken("sig=dummyToken").buildDirectoryClient();
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.exists());
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.exists());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 403, ShareErrorCode.AUTHENTICATION_FAILED);
}
@Test
public void createDirectory() {
assertEquals(201, primaryDirectoryClient.createWithResponse(null, null, null, null, null).getStatusCode());
}
@Test
public void createDirectoryError() {
String testShareName = generateShareName();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> directoryBuilderHelper(testShareName, directoryPath).buildDirectoryClient().create());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.SHARE_NOT_FOUND);
}
@Test
public void createDirectoryWithMetadata() {
assertEquals(201, primaryDirectoryClient.createWithResponse(null, null, testMetadata, null, null)
.getStatusCode());
}
@Test
public void createDirectoryWithFilePermission() {
Response<ShareDirectoryInfo> resp =
primaryDirectoryClient.createWithResponse(null, FILE_PERMISSION, null, null, null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createDirectoryWithFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
Response<ShareDirectoryInfo> resp =
primaryDirectoryClient.createWithResponse(smbProperties, null, null, null, null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createDirectoryWithNtfsAttributes() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> attributes =
EnumSet.of(NtfsFileAttributes.HIDDEN, NtfsFileAttributes.DIRECTORY);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey)
.setNtfsFileAttributes(attributes);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createWithResponse(smbProperties, null, null, null,
null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void createChangeTime() {
OffsetDateTime changeTime = testResourceNamer.now();
primaryDirectoryClient.createWithResponse(new FileSmbProperties().setFileChangeTime(changeTime), null, null,
null, null);
assertTrue(FileShareTestHelper.compareDatesWithPrecision(
primaryDirectoryClient.getProperties().getSmbProperties().getFileChangeTime(), changeTime));
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createDirectoryPermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.createWithResponse(properties, permission, null, null, null));
}
private static Stream<Arguments> permissionAndKeySupplier() {
return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION),
Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB))));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void createTrailingDot(boolean allowTrailingDot) {
ShareClient shareClient = getShareClient(shareName, allowTrailingDot, null);
ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient();
String dirName = generatePathName();
String dirNameWithDot = dirName + ".";
ShareDirectoryClient dirClient = shareClient.getDirectoryClient(dirNameWithDot);
dirClient.create();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) {
foundDirectories.add(fileRef.getName());
}
assertEquals(1, foundDirectories.size());
if (allowTrailingDot) {
assertEquals(dirNameWithDot, foundDirectories.get(0));
} else {
assertEquals(dirName, foundDirectories.get(0));
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void createDirectoryOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
Response<ShareDirectoryInfo> result = dirClient.createWithResponse(null, null, null, null, null);
assertEquals(shareName, dirClient.getShareName());
assertEquals(dirName, dirClient.getDirectoryPath());
assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG));
}
@Test
public void createIfNotExistsDirectoryMin() {
assertNotNull(primaryDirectoryClient.createIfNotExists());
}
@Test
public void createIfNotExistsDirectory() {
assertEquals(201, primaryDirectoryClient
.createIfNotExistsWithResponse(new ShareDirectoryCreateOptions(), null, null).getStatusCode());
}
@Test
public void createIfNotExistsDirectoryError() {
String testShareName = generateShareName();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> directoryBuilderHelper(testShareName, directoryPath).buildDirectoryClient().createIfNotExists());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.SHARE_NOT_FOUND);
}
@Test
public void createIfNotExistsDirectoryThatAlreadyExists() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions();
ShareDirectoryClient primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
Response<ShareDirectoryInfo> initialResponse =
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
Response<ShareDirectoryInfo> secondResponse =
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@Test
public void createIfNotExistsDirectoryWithMetadata() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setMetadata(testMetadata);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createIfNotExistsWithResponse(options,
null, null), 201);
}
@Test
public void createIfNotExistsDirectoryWithFilePermission() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createIfNotExistsDirectoryWithFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setSmbProperties(smbProperties);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createIfNotExistsDirectoryWithNtfsAttributes() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> attributes = EnumSet.of(NtfsFileAttributes.HIDDEN, NtfsFileAttributes.DIRECTORY);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey)
.setNtfsFileAttributes(attributes);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setSmbProperties(smbProperties);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createIfNotExistsDirectoryPermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions()
.setSmbProperties(properties)
.setFilePermission(permission);
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null));
}
@Test
public void deleteDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void deleteTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.create();
FileShareTestHelper.assertResponseStatusCode(directoryClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void deleteDirectoryOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
Response<Void> response = dirClient.deleteWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(response, 202);
assertNotNull(response.getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID));
}
@Test
public void deleteDirectoryError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.delete());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteIfExistsWithResponse(null, null),
202);
}
@Test
public void deleteIfExistsDirectoryMin() {
primaryDirectoryClient.create();
assertTrue(primaryDirectoryClient.deleteIfExists());
}
@Test
public void deleteIfExistsDirectoryThatDoesNotExist() {
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
Response<Boolean> response = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertFalse(primaryDirectoryClient.exists());
}
@Test
public void deleteIfExistsDirectoryThatWasAlreadyDeleted() {
primaryDirectoryClient.create();
Response<Boolean> initialResponse = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
Response<Boolean> secondResponse = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
assertEquals(202, initialResponse.getStatusCode());
assertEquals(404, secondResponse.getStatusCode());
assertTrue(initialResponse.getValue());
assertFalse(secondResponse.getValue());
}
@Test
public void getProperties() {
primaryDirectoryClient.create();
Response<ShareDirectoryProperties> resp = primaryDirectoryClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void getPropertiesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
ShareDirectoryInfo createResponse = directoryClient.createIfNotExists();
Response<ShareDirectoryProperties> propertiesResponse = directoryClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(propertiesResponse, 200);
assertEquals(createResponse.getETag(), propertiesResponse.getValue().getETag());
assertEquals(createResponse.getLastModified(), propertiesResponse.getValue().getLastModified());
FileSmbProperties createSmbProperties = createResponse.getSmbProperties();
FileSmbProperties getPropertiesSmbProperties = propertiesResponse.getValue().getSmbProperties();
assertEquals(createSmbProperties.getFilePermissionKey(), getPropertiesSmbProperties.getFilePermissionKey());
assertEquals(createSmbProperties.getNtfsFileAttributes(), getPropertiesSmbProperties.getNtfsFileAttributes());
assertEquals(createSmbProperties.getFileLastWriteTime(), getPropertiesSmbProperties.getFileLastWriteTime());
assertEquals(createSmbProperties.getFileCreationTime(), getPropertiesSmbProperties.getFileCreationTime());
assertEquals(createSmbProperties.getFileChangeTime(), getPropertiesSmbProperties.getFileChangeTime());
assertEquals(createSmbProperties.getParentId(), getPropertiesSmbProperties.getParentId());
assertEquals(createSmbProperties.getFileId(), getPropertiesSmbProperties.getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void getPropertiesOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
ShareDirectoryInfo createInfo = dirClient.create();
ShareDirectoryProperties properties = dirClient.getProperties();
assertEquals(createInfo.getETag(), properties.getETag());
assertEquals(createInfo.getLastModified(), properties.getLastModified());
assertEquals(createInfo.getSmbProperties().getFilePermissionKey(),
properties.getSmbProperties().getFilePermissionKey());
assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(),
properties.getSmbProperties().getNtfsFileAttributes());
assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(),
properties.getSmbProperties().getFileLastWriteTime());
assertEquals(createInfo.getSmbProperties().getFileCreationTime(),
properties.getSmbProperties().getFileCreationTime());
assertEquals(createInfo.getSmbProperties().getFileChangeTime(),
properties.getSmbProperties().getFileChangeTime());
assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId());
assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId());
}
@Test
public void getPropertiesError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.getPropertiesWithResponse(null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void setPropertiesFilePermission() {
primaryDirectoryClient.create();
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.setPropertiesWithResponse(null, FILE_PERMISSION,
null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void setPropertiesFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.create();
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.setPropertiesWithResponse(smbProperties, null, null,
null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void setHttpHeadersChangeTime() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
OffsetDateTime changeTime = testResourceNamer.now();
smbProperties.setFileChangeTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.setProperties(new FileSmbProperties().setFileChangeTime(changeTime), null);
FileShareTestHelper.compareDatesWithPrecision(primaryDirectoryClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setHttpHeadersTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.createIfNotExists();
Response<ShareDirectoryInfo> res = directoryClient.setPropertiesWithResponse(new FileSmbProperties(), null,
null, null);
FileShareTestHelper.assertResponseStatusCode(res, 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setHttpHeadersOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
Response<ShareDirectoryInfo> res = dirClient.setPropertiesWithResponse(new FileSmbProperties(), null, null,
null);
FileShareTestHelper.assertResponseStatusCode(res, 200);
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void setPropertiesError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.create();
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.setPropertiesWithResponse(properties, permission, null, null));
}
@Test
public void setMetadata() {
primaryDirectoryClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = primaryDirectoryClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
primaryDirectoryClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = primaryDirectoryClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setMetadataTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = directoryClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
directoryClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = directoryClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setMetadataOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = dirClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
dirClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = dirClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@Test
public void setMetadataError() {
primaryDirectoryClient.create();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.setMetadata(errorMetadata));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listFilesAndDirectories(String[] expectedFiles, String[] expectedDirectories) {
primaryDirectoryClient.create();
for (String expectedFile : expectedFiles) {
primaryDirectoryClient.createFile(expectedFile, 2);
}
for (String expectedDirectory : expectedDirectories) {
primaryDirectoryClient.createSubdirectory(expectedDirectory);
}
List<String> foundFiles = new ArrayList<>();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : primaryDirectoryClient.listFilesAndDirectories()) {
if (fileRef.isDirectory()) {
foundDirectories.add(fileRef.getName());
} else {
foundFiles.add(fileRef.getName());
}
}
assertArrayEquals(expectedFiles, foundFiles.toArray());
assertArrayEquals(expectedDirectories, foundDirectories.toArray());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
@ParameterizedTest
@MethodSource("listFilesAndDirectoriesArgsSupplier")
public void listFilesAndDirectoriesArgs(String extraPrefix, Integer maxResults, int numOfResults) {
primaryDirectoryClient.create();
List<String> nameList = new ArrayList<>();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
primaryDirectoryClient.createFile(dirPrefix + 2, 1024);
for (int i = 0; i < 3; i++) {
nameList.add(dirPrefix + i);
}
Iterator<ShareFileItem> fileRefIter = primaryDirectoryClient
.listFilesAndDirectories(prefix + extraPrefix, maxResults, null, null).iterator();
for (int i = 0; i < numOfResults; i++) {
assertEquals(nameList.get(i), fileRefIter.next().getName());
}
assertFalse(fileRefIter.hasNext());
}
private static Stream<Arguments> listFilesAndDirectoriesArgsSupplier() {
return Stream.of(
Arguments.of("", null, 3),
Arguments.of("", 1, 3),
Arguments.of("noOp", 3, 0));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
@ParameterizedTest
@CsvSource(value = {"false,false,false,false", "true,false,false,false", "false,true,false,false",
"false,false,true,false", "false,false,false,true", "true,true,true,true"})
public void listFilesAndDirectoriesExtendedInfoArgs(boolean timestamps, boolean etag, boolean attributes,
boolean permissionKey) {
primaryDirectoryClient.create();
List<String> nameList = new ArrayList<>();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
primaryDirectoryClient.createFile(dirPrefix + 2, 1024);
for (int i = 0; i < 3; i++) {
nameList.add(dirPrefix + i);
}
ShareListFilesAndDirectoriesOptions options = new ShareListFilesAndDirectoriesOptions()
.setPrefix(prefix)
.setIncludeExtendedInfo(true)
.setIncludeTimestamps(timestamps)
.setIncludeETag(etag)
.setIncludeAttributes(attributes)
.setIncludePermissionKey(permissionKey);
List<ShareFileItem> returnedFileList = primaryDirectoryClient.listFilesAndDirectories(options, null, null)
.stream().collect(Collectors.toList());
for (int i = 0; i < nameList.size(); i++) {
assertEquals(nameList.get(i), returnedFileList.get(i).getName());
}
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
public void listFilesAndDirectoriesExtendedInfoResults() {
ShareDirectoryClient parentDir = primaryDirectoryClient;
parentDir.create();
ShareFileClient file = parentDir.createFile(generatePathName(), 1024);
ShareDirectoryClient dir = parentDir.createSubdirectory(generatePathName());
List<ShareFileItem> listResults = parentDir.listFilesAndDirectories(
new ShareListFilesAndDirectoriesOptions()
.setIncludeExtendedInfo(true)
.setIncludeTimestamps(true)
.setIncludePermissionKey(true)
.setIncludeETag(true)
.setIncludeAttributes(true),
null, null)
.stream().collect(Collectors.toList());
ShareFileItem dirListItem;
ShareFileItem fileListItem;
if (listResults.get(0).isDirectory()) {
dirListItem = listResults.get(0);
fileListItem = listResults.get(1);
} else {
dirListItem = listResults.get(1);
fileListItem = listResults.get(0);
}
assertEquals(dirListItem.getName(), new File(dir.getDirectoryPath()).getName());
assertTrue(dirListItem.isDirectory());
assertNotNull(dirListItem.getId());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getId()));
assertEquals(EnumSet.of(NtfsFileAttributes.DIRECTORY), dirListItem.getFileAttributes());
assertNotNull(dirListItem.getPermissionKey());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getPermissionKey()));
assertNotNull(dirListItem.getProperties().getCreatedOn());
assertNotNull(dirListItem.getProperties().getLastAccessedOn());
assertNotNull(dirListItem.getProperties().getLastWrittenOn());
assertNotNull(dirListItem.getProperties().getChangedOn());
assertNotNull(dirListItem.getProperties().getLastModified());
assertNotNull(dirListItem.getProperties().getETag());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getProperties().getETag()));
assertEquals(fileListItem.getName(), new File(file.getFilePath()).getName());
assertFalse(fileListItem.isDirectory());
assertNotNull(fileListItem.getId());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getId()));
assertEquals(EnumSet.of(NtfsFileAttributes.ARCHIVE), fileListItem.getFileAttributes());
assertNotNull(fileListItem.getPermissionKey());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getPermissionKey()));
assertNotNull(fileListItem.getProperties().getCreatedOn());
assertNotNull(fileListItem.getProperties().getLastAccessedOn());
assertNotNull(fileListItem.getProperties().getLastWrittenOn());
assertNotNull(fileListItem.getProperties().getChangedOn());
assertNotNull(fileListItem.getProperties().getLastModified());
assertNotNull(fileListItem.getProperties().getETag());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getProperties().getETag()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncoded() {
String specialCharDirectoryName = "directory\uFFFE";
String specialCharFileName = "file\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(specialCharDirectoryName);
primaryDirectoryClient.createFile(specialCharFileName, 1024);
List<ShareFileItem> shareFileItems = primaryDirectoryClient.listFilesAndDirectories().stream()
.collect(Collectors.toList());
assertEquals(2, shareFileItems.size());
assertTrue(shareFileItems.get(0).isDirectory());
assertEquals(specialCharDirectoryName, shareFileItems.get(0).getName());
assertFalse(shareFileItems.get(1).isDirectory());
assertEquals(specialCharFileName, shareFileItems.get(1).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncodedContinuationToken() {
String specialCharFileName0 = "file0\uFFFE";
String specialCharFileName1 = "file1\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(specialCharFileName0, 1024);
primaryDirectoryClient.createFile(specialCharFileName1, 1024);
List<ShareFileItem> shareFileItems = new ArrayList<>();
for (PagedResponse<ShareFileItem> page : primaryDirectoryClient.listFilesAndDirectories().iterableByPage(1)) {
shareFileItems.addAll(page.getValue());
}
assertEquals(specialCharFileName0, shareFileItems.get(0).getName());
assertEquals(specialCharFileName1, shareFileItems.get(1).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncodedPrefix() {
String specialCharDirectoryName = "directory\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(specialCharDirectoryName);
List<ShareFileItem> shareFileItems = primaryDirectoryClient.listFilesAndDirectories().stream()
.collect(Collectors.toList());
assertEquals(1, shareFileItems.size());
assertTrue(shareFileItems.get(0).isDirectory());
assertEquals(specialCharDirectoryName, shareFileItems.get(0).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void testListFilesAndDirectoriesOAuth() {
ShareDirectoryClient dirClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP))
.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
List<String> fileNames = new ArrayList<>();
List<String> dirNames = new ArrayList<>();
for (int i = 0; i < 11; i++) {
fileNames.add(generatePathName());
}
for (int i = 0; i < 5; i++) {
dirNames.add(generatePathName());
}
for (String file : fileNames) {
dirClient.createFile(file, Constants.KB);
}
for (String directory : dirNames) {
dirClient.createSubdirectory(directory);
}
List<String> foundFiles = new ArrayList<>();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : dirClient.listFilesAndDirectories()) {
if (fileRef.isDirectory()) {
foundDirectories.add(fileRef.getName());
} else {
foundFiles.add(fileRef.getName());
}
}
assertTrue(fileNames.containsAll(foundFiles));
assertTrue(dirNames.containsAll(foundDirectories));
}
@Test
public void listMaxResultsByPage() {
primaryDirectoryClient.create();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
for (PagedResponse<ShareFileItem> page
: primaryDirectoryClient.listFilesAndDirectories(prefix, null, null, null).iterableByPage(1)) {
assertEquals(1, page.getValue().size());
}
}
@ParameterizedTest
@MethodSource("listHandlesSupplier")
public void listHandles(Integer maxResults, boolean recursive) {
primaryDirectoryClient.create();
List<HandleItem> handles = primaryDirectoryClient.listHandles(maxResults, recursive, null, null).stream()
.collect(Collectors.toList());
assertEquals(0, handles.size());
}
private static Stream<Arguments> listHandlesSupplier() {
return Stream.of(
Arguments.of(2, true),
Arguments.of(null, false));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void listHandlesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
String directoryName = generatePathName() + ".";
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(directoryName);
directoryClient.create();
List<HandleItem> handles = directoryClient.listHandles(null, false, null, null).stream()
.collect(Collectors.toList());
assertEquals(0, handles.size());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void listHandlesOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
List<HandleItem> handles = dirClient.listHandles(2, true, null, null).stream().collect(Collectors.toList());
assertEquals(0, handles.size());
}
@Test
public void listHandlesError() {
Exception e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.listHandles(null, true, null, null).iterator().hasNext());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
public void forceCloseHandleMin() {
primaryDirectoryClient.create();
CloseHandlesInfo handlesClosedInfo = primaryDirectoryClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
public void forceCloseHandleInvalidHandleId() {
primaryDirectoryClient.create();
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.forceCloseHandle("invalidHandleId"));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void forceCloseHandleOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
CloseHandlesInfo handlesClosedInfo = dirClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
public void forceCloseAllHandlesMin() {
primaryDirectoryClient.create();
CloseHandlesInfo handlesClosedInfo = primaryDirectoryClient.forceCloseAllHandles(false, null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void forceCloseAllHandlesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.create();
CloseHandlesInfo handlesClosedInfo = directoryClient.forceCloseAllHandles(false, null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameMin() {
primaryDirectoryClient.create();
assertDoesNotThrow(() -> primaryDirectoryClient.rename(generatePathName()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameWithResponse() {
primaryDirectoryClient.create();
Response<ShareDirectoryClient> resp = primaryDirectoryClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()), null, null);
ShareDirectoryClient renamedClient = resp.getValue();
assertDoesNotThrow(renamedClient::getProperties);
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.getProperties());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDifferentDirectory() {
primaryDirectoryClient.create();
ShareDirectoryClient destinationClient = shareClient.getDirectoryClient(generatePathName());
destinationClient.create();
String destinationPath = destinationClient.getFileClient(generatePathName()).getFilePath();
ShareDirectoryClient resultClient = primaryDirectoryClient.rename(destinationPath);
assertTrue(resultClient.exists());
assertFalse(primaryDirectoryClient.exists());
assertEquals(destinationPath, resultClient.getDirectoryPath());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameReplaceIfExists(boolean replaceIfExists) {
primaryDirectoryClient.create();
ShareFileClient destination = shareClient.getFileClient(generatePathName());
destination.create(512L);
boolean exception = false;
try {
primaryDirectoryClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath())
.setReplaceIfExists(replaceIfExists), null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(replaceIfExists, !exception);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameIgnoreReadOnly(boolean ignoreReadOnly) {
primaryDirectoryClient.create();
FileSmbProperties props = new FileSmbProperties().setNtfsFileAttributes(
EnumSet.of(NtfsFileAttributes.READ_ONLY));
ShareFileClient destinationFile = shareClient.getFileClient(generatePathName());
destinationFile.createWithResponse(512L, null, props, null, null, null, null, null);
ShareFileRenameOptions options = new ShareFileRenameOptions(destinationFile.getFilePath())
.setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true);
boolean exception = false;
try {
primaryDirectoryClient.renameWithResponse(options, null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(!ignoreReadOnly, exception);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFilePermission() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission);
ShareDirectoryClient destClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFilePermissionAndKeySet() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission)
.setSmbProperties(new FileSmbProperties()
.setFilePermissionKey("filePermissionkey"));
assertThrows(ShareStorageException.class, () ->
primaryDirectoryClient.renameWithResponse(options, null, null).getValue());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFileSmbProperties() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
String permissionKey = shareClient.createPermission(filePermission);
FileSmbProperties smbProperties = new FileSmbProperties()
.setFilePermissionKey(permissionKey)
.setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.DIRECTORY))
.setFileCreationTime(testResourceNamer.now().minusDays(5))
.setFileLastWriteTime(testResourceNamer.now().minusYears(2))
.setFileChangeTime(testResourceNamer.now());
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName()).setSmbProperties(smbProperties);
ShareDirectoryClient destClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
FileSmbProperties destSmbProperties = destClient.getProperties().getSmbProperties();
assertEquals(EnumSet.of(NtfsFileAttributes.DIRECTORY), destSmbProperties.getNtfsFileAttributes());
assertNotNull(destSmbProperties.getFileCreationTime());
assertNotNull(destSmbProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(destSmbProperties.getFileChangeTime(), testResourceNamer.now());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameMetadata() {
primaryDirectoryClient.create();
String key = "update";
String value = "value";
Map<String, String> updatedMetadata = Collections.singletonMap(key, value);
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setMetadata(updatedMetadata);
ShareDirectoryClient renamedClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
ShareDirectoryProperties properties = renamedClient.getProperties();
assertNotNull(properties.getMetadata().get(key));
assertEquals(value, renamedClient.getProperties().getMetadata().get(key));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String dirRename = generatePathName();
ShareFileRenameOptions options = new ShareFileRenameOptions(dirRename);
ShareDirectoryClient renamedClient = dirClient.renameWithResponse(options, null, null).getValue();
assertDoesNotThrow(renamedClient::getProperties);
assertEquals(dirRename, renamedClient.getDirectoryPath());
assertThrows(ShareStorageException.class, dirClient::getProperties);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameError() {
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.rename(generatePathName()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDestAC() {
primaryDirectoryClient.create();
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions().setLeaseId(leaseID);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.renameWithResponse(
new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null,
null), 200);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDestACFail() {
primaryDirectoryClient.create();
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions().setLeaseId(GARBAGE_LEASE_ID);
assertThrows(RuntimeException.class,
() -> primaryDirectoryClient.renameWithResponse(new ShareFileRenameOptions(pathName)
.setDestinationRequestConditions(src).setReplaceIfExists(true), null, null));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12")
public void testRenameSASToken() {
ShareFileSasPermission permissions = new ShareFileSasPermission()
.setReadPermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setDeletePermission(true);
OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1);
ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions);
String sas = shareClient.generateSas(sasValues);
ShareDirectoryClient client = getDirectoryClient(sas, primaryDirectoryClient.getDirectoryUrl());
primaryDirectoryClient.create();
String directoryName = generatePathName();
ShareDirectoryClient destClient = client.rename(directoryName);
assertNotNull(destClient);
destClient.getProperties();
assertEquals(directoryName, destClient.getDirectoryPath());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void renameTrailingDot() {
shareClient = getShareClient(shareName, true, true);
String directoryName = generatePathName() + ".";
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(directoryName);
directoryClient.create();
assertDoesNotThrow(() -> directoryClient.rename(directoryName));
}
@Test
public void createSubDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryWithResponse(
"testCreateSubDirectory", null, null, null, null, null), 201);
}
@Test
public void createSubDirectoryInvalidName() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectory("test/subdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.PARENT_NOT_FOUND);
}
@Test
public void createSubDirectoryMetadata() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryWithResponse(
"testCreateSubDirectory", null, null, testMetadata, null, null), 201);
}
@Test
public void createSubDirectoryMetadataError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryWithResponse("testsubdirectory", null, null,
Collections.singletonMap("", "value"), null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void createSubDirectoryFilePermission() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createSubdirectoryWithResponse("testCreateSubDirectory",
null, FILE_PERMISSION, null, null, null), 201);
}
@Test
public void createSubDirectoryFilePermissionKey() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createSubdirectoryWithResponse("testCreateSubDirectory", smbProperties, null, null,
null, null), 201);
}
@Test
public void createIfNotExistsSubDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions(), null, null), 201);
}
@Test
public void createIfNotExistsSubDirectoryAlreadyExists() {
String subdirectoryName = generatePathName();
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
primaryDirectoryClient.create();
int initialResponseCode = primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
subdirectoryName,
new ShareDirectoryCreateOptions(),
null, null)
.getStatusCode();
int secondResponseCode = primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
subdirectoryName,
new ShareDirectoryCreateOptions(),
null, null)
.getStatusCode();
assertEquals(201, initialResponseCode);
assertEquals(409, secondResponseCode);
}
@Test
public void createIfNotExistsSubDirectoryInvalidName() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryIfNotExists("test/subdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.PARENT_NOT_FOUND);
}
@Test
public void createIfNotExistsSubDirectoryMetadata() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setMetadata(testMetadata), null, null), 201);
}
@Test
public void createIfNotExistsSubDirectoryMetadataError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testsubdirectory",
new ShareDirectoryCreateOptions()
.setMetadata(Collections.singletonMap("", "value")),
null,
null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void createIfNotExistsSubDirectoryFilePermission() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION), null, null),
201);
}
@Test
public void testCreateIfNotExistsSubDirectoryFilePermissionKey() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setSmbProperties(smbProperties), null, null),
201);
}
@Test
public void testDeleteSubDirectory() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteSubdirectoryWithResponse(
subDirectoryName, null, null), 202);
}
@Test
public void deleteSubDirectoryError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.deleteSubdirectory("testsubdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsSubDirectory() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient
.deleteSubdirectoryIfExistsWithResponse(subDirectoryName, null, null), 202);
}
@Test
public void deleteIfExistsSubDirectoryMin() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
assertTrue(primaryDirectoryClient.deleteSubdirectoryIfExists(subDirectoryName));
}
@Test
public void deleteIfExistsSubDirectoryThatDoesNotExist() {
primaryDirectoryClient.create();
Response<Boolean> response = primaryDirectoryClient.deleteSubdirectoryIfExistsWithResponse("testsubdirectory",
null, null);
assertEquals(404, response.getStatusCode());
assertFalse(response.getValue());
}
@Test
public void createFile() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createFileWithResponse("testCreateFile", 1024, null, null, null, null, null, null),
201);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileInvalidArgs(String fileName, long maxSize, int statusCode, ShareErrorCode errMsg) {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createFileWithResponse(fileName, maxSize, null, null, null, null, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, statusCode, errMsg);
}
@Test
public void createFileMaxOverload() {
primaryDirectoryClient.create();
ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("txt");
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createFileWithResponse("testCreateFile", 1024, httpHeaders, smbProperties,
FILE_PERMISSION, testMetadata, null, null), 201);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileMaxOverloadInvalidArgs(String fileName, long maxSize, ShareFileHttpHeaders httpHeaders,
Map<String, String> metadata, ShareErrorCode errMsg) {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createFileWithResponse(fileName, maxSize, httpHeaders, null, null, metadata,
null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, errMsg);
}
@Test
public void deleteFile() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.deleteFileWithResponse(fileName, null, null), 202);
}
@Test
public void deleteFileError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.deleteFileWithResponse("testfile", null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsFileMin() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
assertTrue(primaryDirectoryClient.deleteFileIfExists(fileName));
}
@Test
public void deleteIfExistsFile() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteFileIfExistsWithResponse(fileName,
null, null), 202);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
primaryDirectoryClient.create();
Response<Boolean> response = primaryDirectoryClient.deleteFileIfExistsWithResponse("testfile", null, null);
assertEquals(404, response.getStatusCode());
assertFalse(response.getValue());
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareDirectoryClient shareSnapshotClient = directoryBuilderHelper(shareName, directoryPath).snapshot(snapshot)
.buildDirectoryClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryDirectoryClient.getShareName());
}
@Test
public void getDirectoryPath() {
assertEquals(directoryPath, primaryDirectoryClient.getDirectoryPath());
}
@Test
public void testPerCallPolicy() {
primaryDirectoryClient.create();
ShareDirectoryClient directoryClient = directoryBuilderHelper(primaryDirectoryClient.getShareName(),
primaryDirectoryClient.getDirectoryPath())
.addPolicy(getPerCallVersionPolicy()).buildDirectoryClient();
Response<ShareDirectoryProperties> response = directoryClient.getPropertiesWithResponse(null, null);
assertDoesNotThrow(() -> response.getHeaders().getValue("x-ms-version").equals("2017-11-09"));
}
@ParameterizedTest
@ValueSource(strings = {"", "/"})
public void rootDirectorySupport(String rootDirPath) {
String dir1Name = "dir1";
String dir2Name = "dir2";
shareClient.createDirectory(dir1Name).createSubdirectory(dir2Name);
ShareDirectoryClient rootDirectory = shareClient.getDirectoryClient(rootDirPath);
assertTrue(rootDirectory.exists());
assertTrue(rootDirectory.getSubdirectoryClient(dir1Name).exists());
}
@Test
public void createShareWithSmallTimeoutsFailForServiceClient() {
int maxRetries = 5;
long retryDelayMillis = 1000;
for (int i = 0; i < maxRetries; i++) {
try {
HttpClientOptions clientOptions = new HttpClientOptions()
.setApplicationId("client-options-id")
.setResponseTimeout(Duration.ofNanos(1))
.setReadTimeout(Duration.ofNanos(1))
.setWriteTimeout(Duration.ofNanos(1))
.setConnectTimeout(Duration.ofNanos(1));
ShareServiceClientBuilder clientBuilder = new ShareServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.retryOptions(new RequestRetryOptions(null, 1, (Integer) null, null, null, null))
.clientOptions(clientOptions);
ShareServiceClient serviceClient = clientBuilder.buildClient();
assertThrows(RuntimeException.class, () -> serviceClient.createShareWithResponse(generateShareName(),
null, Duration.ofSeconds(10), null));
return;
} catch (Exception e) {
try {
Thread.sleep(retryDelayMillis);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}
}
@Test
public void defaultAudience() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(null) /* should default to "https:
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
@Test
public void storageAccountAudience() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(primaryDirectoryClient.getAccountName())));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
@Test
public void audienceError() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badAudience")));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
ShareStorageException e = assertThrows(ShareStorageException.class, aadDirClient::exists);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
} | class DirectoryApiTests extends FileShareTestBase {
private ShareDirectoryClient primaryDirectoryClient;
private ShareClient shareClient;
private String directoryPath;
private String shareName;
private static Map<String, String> testMetadata;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
directoryPath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryDirectoryClient = directoryBuilderHelper(shareName, directoryPath).buildDirectoryClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes>of(NtfsFileAttributes.NORMAL));
}
@Test
public void getDirectoryUrl() {
String accountName = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()).getAccountName();
String expectURL = String.format("https:
directoryPath);
String directoryURL = primaryDirectoryClient.getDirectoryUrl();
assertEquals(expectURL, directoryURL);
}
@Test
public void getShareSnapshotUrl() {
String accountName = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()).getAccountName();
String expectURL = String.format("https:
directoryPath);
ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot();
expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot();
ShareDirectoryClient newDirClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot())
.buildClient().getDirectoryClient(directoryPath);
String directoryURL = newDirClient.getDirectoryUrl();
assertEquals(expectURL, directoryURL);
String snapshotEndpoint = String.format("https:
shareName, directoryPath, shareSnapshotInfo.getSnapshot());
ShareDirectoryClient client = getDirectoryClient(StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint);
assertEquals(client.getDirectoryUrl(), snapshotEndpoint);
}
@Test
public void getSubDirectoryClient() {
ShareDirectoryClient subDirectoryClient = primaryDirectoryClient.getSubdirectoryClient("testSubDirectory");
assertInstanceOf(ShareDirectoryClient.class, subDirectoryClient);
}
@Test
public void getFileClient() {
ShareFileClient fileClient = primaryDirectoryClient.getFileClient("testFile");
assertInstanceOf(ShareFileClient.class, fileClient);
}
private static Stream<Arguments> getNonEncodedFileNameSupplier() {
return Stream.of(
Arguments.of("test%test"),
Arguments.of("%Россия 한국 中国!"),
Arguments.of("%E6%96%91%E9%BB%9E"),
Arguments.of("斑點")
);
}
@ParameterizedTest
@MethodSource("getNonEncodedFileNameSupplier")
@Test
public void exists() {
primaryDirectoryClient.create();
assertTrue(primaryDirectoryClient.exists());
}
@Test
public void doesNotExist() {
assertFalse(primaryDirectoryClient.exists());
}
@Test
public void existsError() {
primaryDirectoryClient = directoryBuilderHelper(shareName, directoryPath)
.sasToken("sig=dummyToken").buildDirectoryClient();
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.exists());
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.exists());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 403, ShareErrorCode.AUTHENTICATION_FAILED);
}
@Test
public void createDirectory() {
assertEquals(201, primaryDirectoryClient.createWithResponse(null, null, null, null, null).getStatusCode());
}
@Test
public void createDirectoryError() {
String testShareName = generateShareName();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> directoryBuilderHelper(testShareName, directoryPath).buildDirectoryClient().create());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.SHARE_NOT_FOUND);
}
@Test
public void createDirectoryWithMetadata() {
assertEquals(201, primaryDirectoryClient.createWithResponse(null, null, testMetadata, null, null)
.getStatusCode());
}
@Test
public void createDirectoryWithFilePermission() {
Response<ShareDirectoryInfo> resp =
primaryDirectoryClient.createWithResponse(null, FILE_PERMISSION, null, null, null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createDirectoryWithFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
Response<ShareDirectoryInfo> resp =
primaryDirectoryClient.createWithResponse(smbProperties, null, null, null, null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createDirectoryWithNtfsAttributes() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> attributes =
EnumSet.of(NtfsFileAttributes.HIDDEN, NtfsFileAttributes.DIRECTORY);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey)
.setNtfsFileAttributes(attributes);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createWithResponse(smbProperties, null, null, null,
null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void createChangeTime() {
OffsetDateTime changeTime = testResourceNamer.now();
primaryDirectoryClient.createWithResponse(new FileSmbProperties().setFileChangeTime(changeTime), null, null,
null, null);
assertTrue(FileShareTestHelper.compareDatesWithPrecision(
primaryDirectoryClient.getProperties().getSmbProperties().getFileChangeTime(), changeTime));
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createDirectoryPermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.createWithResponse(properties, permission, null, null, null));
}
private static Stream<Arguments> permissionAndKeySupplier() {
return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION),
Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB))));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void createTrailingDot(boolean allowTrailingDot) {
ShareClient shareClient = getShareClient(shareName, allowTrailingDot, null);
ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient();
String dirName = generatePathName();
String dirNameWithDot = dirName + ".";
ShareDirectoryClient dirClient = shareClient.getDirectoryClient(dirNameWithDot);
dirClient.create();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) {
foundDirectories.add(fileRef.getName());
}
assertEquals(1, foundDirectories.size());
if (allowTrailingDot) {
assertEquals(dirNameWithDot, foundDirectories.get(0));
} else {
assertEquals(dirName, foundDirectories.get(0));
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void createDirectoryOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
Response<ShareDirectoryInfo> result = dirClient.createWithResponse(null, null, null, null, null);
assertEquals(shareName, dirClient.getShareName());
assertEquals(dirName, dirClient.getDirectoryPath());
assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG));
}
@Test
public void createIfNotExistsDirectoryMin() {
assertNotNull(primaryDirectoryClient.createIfNotExists());
}
@Test
public void createIfNotExistsDirectory() {
assertEquals(201, primaryDirectoryClient
.createIfNotExistsWithResponse(new ShareDirectoryCreateOptions(), null, null).getStatusCode());
}
@Test
public void createIfNotExistsDirectoryError() {
String testShareName = generateShareName();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> directoryBuilderHelper(testShareName, directoryPath).buildDirectoryClient().createIfNotExists());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.SHARE_NOT_FOUND);
}
@Test
public void createIfNotExistsDirectoryThatAlreadyExists() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions();
ShareDirectoryClient primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
Response<ShareDirectoryInfo> initialResponse =
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
Response<ShareDirectoryInfo> secondResponse =
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@Test
public void createIfNotExistsDirectoryWithMetadata() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setMetadata(testMetadata);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createIfNotExistsWithResponse(options,
null, null), 201);
}
@Test
public void createIfNotExistsDirectoryWithFilePermission() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createIfNotExistsDirectoryWithFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setSmbProperties(smbProperties);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createIfNotExistsDirectoryWithNtfsAttributes() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> attributes = EnumSet.of(NtfsFileAttributes.HIDDEN, NtfsFileAttributes.DIRECTORY);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey)
.setNtfsFileAttributes(attributes);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setSmbProperties(smbProperties);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createIfNotExistsDirectoryPermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions()
.setSmbProperties(properties)
.setFilePermission(permission);
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null));
}
@Test
public void deleteDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void deleteTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.create();
FileShareTestHelper.assertResponseStatusCode(directoryClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void deleteDirectoryOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
Response<Void> response = dirClient.deleteWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(response, 202);
assertNotNull(response.getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID));
}
@Test
public void deleteDirectoryError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.delete());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteIfExistsWithResponse(null, null),
202);
}
@Test
public void deleteIfExistsDirectoryMin() {
primaryDirectoryClient.create();
assertTrue(primaryDirectoryClient.deleteIfExists());
}
@Test
public void deleteIfExistsDirectoryThatDoesNotExist() {
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
Response<Boolean> response = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertFalse(primaryDirectoryClient.exists());
}
@Test
public void deleteIfExistsDirectoryThatWasAlreadyDeleted() {
primaryDirectoryClient.create();
Response<Boolean> initialResponse = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
Response<Boolean> secondResponse = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
assertEquals(202, initialResponse.getStatusCode());
assertEquals(404, secondResponse.getStatusCode());
assertTrue(initialResponse.getValue());
assertFalse(secondResponse.getValue());
}
@Test
public void getProperties() {
primaryDirectoryClient.create();
Response<ShareDirectoryProperties> resp = primaryDirectoryClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void getPropertiesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
ShareDirectoryInfo createResponse = directoryClient.createIfNotExists();
Response<ShareDirectoryProperties> propertiesResponse = directoryClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(propertiesResponse, 200);
assertEquals(createResponse.getETag(), propertiesResponse.getValue().getETag());
assertEquals(createResponse.getLastModified(), propertiesResponse.getValue().getLastModified());
FileSmbProperties createSmbProperties = createResponse.getSmbProperties();
FileSmbProperties getPropertiesSmbProperties = propertiesResponse.getValue().getSmbProperties();
assertEquals(createSmbProperties.getFilePermissionKey(), getPropertiesSmbProperties.getFilePermissionKey());
assertEquals(createSmbProperties.getNtfsFileAttributes(), getPropertiesSmbProperties.getNtfsFileAttributes());
assertEquals(createSmbProperties.getFileLastWriteTime(), getPropertiesSmbProperties.getFileLastWriteTime());
assertEquals(createSmbProperties.getFileCreationTime(), getPropertiesSmbProperties.getFileCreationTime());
assertEquals(createSmbProperties.getFileChangeTime(), getPropertiesSmbProperties.getFileChangeTime());
assertEquals(createSmbProperties.getParentId(), getPropertiesSmbProperties.getParentId());
assertEquals(createSmbProperties.getFileId(), getPropertiesSmbProperties.getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void getPropertiesOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
ShareDirectoryInfo createInfo = dirClient.create();
ShareDirectoryProperties properties = dirClient.getProperties();
assertEquals(createInfo.getETag(), properties.getETag());
assertEquals(createInfo.getLastModified(), properties.getLastModified());
assertEquals(createInfo.getSmbProperties().getFilePermissionKey(),
properties.getSmbProperties().getFilePermissionKey());
assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(),
properties.getSmbProperties().getNtfsFileAttributes());
assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(),
properties.getSmbProperties().getFileLastWriteTime());
assertEquals(createInfo.getSmbProperties().getFileCreationTime(),
properties.getSmbProperties().getFileCreationTime());
assertEquals(createInfo.getSmbProperties().getFileChangeTime(),
properties.getSmbProperties().getFileChangeTime());
assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId());
assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId());
}
@Test
public void getPropertiesError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.getPropertiesWithResponse(null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void setPropertiesFilePermission() {
primaryDirectoryClient.create();
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.setPropertiesWithResponse(null, FILE_PERMISSION,
null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void setPropertiesFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.create();
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.setPropertiesWithResponse(smbProperties, null, null,
null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void setHttpHeadersChangeTime() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
OffsetDateTime changeTime = testResourceNamer.now();
smbProperties.setFileChangeTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.setProperties(new FileSmbProperties().setFileChangeTime(changeTime), null);
FileShareTestHelper.compareDatesWithPrecision(primaryDirectoryClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setHttpHeadersTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.createIfNotExists();
Response<ShareDirectoryInfo> res = directoryClient.setPropertiesWithResponse(new FileSmbProperties(), null,
null, null);
FileShareTestHelper.assertResponseStatusCode(res, 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setHttpHeadersOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
Response<ShareDirectoryInfo> res = dirClient.setPropertiesWithResponse(new FileSmbProperties(), null, null,
null);
FileShareTestHelper.assertResponseStatusCode(res, 200);
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void setPropertiesError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.create();
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.setPropertiesWithResponse(properties, permission, null, null));
}
@Test
public void setMetadata() {
primaryDirectoryClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = primaryDirectoryClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
primaryDirectoryClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = primaryDirectoryClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setMetadataTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = directoryClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
directoryClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = directoryClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setMetadataOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = dirClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
dirClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = dirClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@Test
public void setMetadataError() {
primaryDirectoryClient.create();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.setMetadata(errorMetadata));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listFilesAndDirectories(String[] expectedFiles, String[] expectedDirectories) {
primaryDirectoryClient.create();
for (String expectedFile : expectedFiles) {
primaryDirectoryClient.createFile(expectedFile, 2);
}
for (String expectedDirectory : expectedDirectories) {
primaryDirectoryClient.createSubdirectory(expectedDirectory);
}
List<String> foundFiles = new ArrayList<>();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : primaryDirectoryClient.listFilesAndDirectories()) {
if (fileRef.isDirectory()) {
foundDirectories.add(fileRef.getName());
} else {
foundFiles.add(fileRef.getName());
}
}
assertArrayEquals(expectedFiles, foundFiles.toArray());
assertArrayEquals(expectedDirectories, foundDirectories.toArray());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
@ParameterizedTest
@MethodSource("listFilesAndDirectoriesArgsSupplier")
public void listFilesAndDirectoriesArgs(String extraPrefix, Integer maxResults, int numOfResults) {
primaryDirectoryClient.create();
List<String> nameList = new ArrayList<>();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
primaryDirectoryClient.createFile(dirPrefix + 2, 1024);
for (int i = 0; i < 3; i++) {
nameList.add(dirPrefix + i);
}
Iterator<ShareFileItem> fileRefIter = primaryDirectoryClient
.listFilesAndDirectories(prefix + extraPrefix, maxResults, null, null).iterator();
for (int i = 0; i < numOfResults; i++) {
assertEquals(nameList.get(i), fileRefIter.next().getName());
}
assertFalse(fileRefIter.hasNext());
}
private static Stream<Arguments> listFilesAndDirectoriesArgsSupplier() {
return Stream.of(
Arguments.of("", null, 3),
Arguments.of("", 1, 3),
Arguments.of("noOp", 3, 0));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
@ParameterizedTest
@CsvSource(value = {"false,false,false,false", "true,false,false,false", "false,true,false,false",
"false,false,true,false", "false,false,false,true", "true,true,true,true"})
public void listFilesAndDirectoriesExtendedInfoArgs(boolean timestamps, boolean etag, boolean attributes,
boolean permissionKey) {
primaryDirectoryClient.create();
List<String> nameList = new ArrayList<>();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
primaryDirectoryClient.createFile(dirPrefix + 2, 1024);
for (int i = 0; i < 3; i++) {
nameList.add(dirPrefix + i);
}
ShareListFilesAndDirectoriesOptions options = new ShareListFilesAndDirectoriesOptions()
.setPrefix(prefix)
.setIncludeExtendedInfo(true)
.setIncludeTimestamps(timestamps)
.setIncludeETag(etag)
.setIncludeAttributes(attributes)
.setIncludePermissionKey(permissionKey);
List<ShareFileItem> returnedFileList = primaryDirectoryClient.listFilesAndDirectories(options, null, null)
.stream().collect(Collectors.toList());
for (int i = 0; i < nameList.size(); i++) {
assertEquals(nameList.get(i), returnedFileList.get(i).getName());
}
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
public void listFilesAndDirectoriesExtendedInfoResults() {
ShareDirectoryClient parentDir = primaryDirectoryClient;
parentDir.create();
ShareFileClient file = parentDir.createFile(generatePathName(), 1024);
ShareDirectoryClient dir = parentDir.createSubdirectory(generatePathName());
List<ShareFileItem> listResults = parentDir.listFilesAndDirectories(
new ShareListFilesAndDirectoriesOptions()
.setIncludeExtendedInfo(true)
.setIncludeTimestamps(true)
.setIncludePermissionKey(true)
.setIncludeETag(true)
.setIncludeAttributes(true),
null, null)
.stream().collect(Collectors.toList());
ShareFileItem dirListItem;
ShareFileItem fileListItem;
if (listResults.get(0).isDirectory()) {
dirListItem = listResults.get(0);
fileListItem = listResults.get(1);
} else {
dirListItem = listResults.get(1);
fileListItem = listResults.get(0);
}
assertEquals(dirListItem.getName(), new File(dir.getDirectoryPath()).getName());
assertTrue(dirListItem.isDirectory());
assertNotNull(dirListItem.getId());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getId()));
assertEquals(EnumSet.of(NtfsFileAttributes.DIRECTORY), dirListItem.getFileAttributes());
assertNotNull(dirListItem.getPermissionKey());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getPermissionKey()));
assertNotNull(dirListItem.getProperties().getCreatedOn());
assertNotNull(dirListItem.getProperties().getLastAccessedOn());
assertNotNull(dirListItem.getProperties().getLastWrittenOn());
assertNotNull(dirListItem.getProperties().getChangedOn());
assertNotNull(dirListItem.getProperties().getLastModified());
assertNotNull(dirListItem.getProperties().getETag());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getProperties().getETag()));
assertEquals(fileListItem.getName(), new File(file.getFilePath()).getName());
assertFalse(fileListItem.isDirectory());
assertNotNull(fileListItem.getId());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getId()));
assertEquals(EnumSet.of(NtfsFileAttributes.ARCHIVE), fileListItem.getFileAttributes());
assertNotNull(fileListItem.getPermissionKey());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getPermissionKey()));
assertNotNull(fileListItem.getProperties().getCreatedOn());
assertNotNull(fileListItem.getProperties().getLastAccessedOn());
assertNotNull(fileListItem.getProperties().getLastWrittenOn());
assertNotNull(fileListItem.getProperties().getChangedOn());
assertNotNull(fileListItem.getProperties().getLastModified());
assertNotNull(fileListItem.getProperties().getETag());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getProperties().getETag()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncoded() {
String specialCharDirectoryName = "directory\uFFFE";
String specialCharFileName = "file\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(specialCharDirectoryName);
primaryDirectoryClient.createFile(specialCharFileName, 1024);
List<ShareFileItem> shareFileItems = primaryDirectoryClient.listFilesAndDirectories().stream()
.collect(Collectors.toList());
assertEquals(2, shareFileItems.size());
assertTrue(shareFileItems.get(0).isDirectory());
assertEquals(specialCharDirectoryName, shareFileItems.get(0).getName());
assertFalse(shareFileItems.get(1).isDirectory());
assertEquals(specialCharFileName, shareFileItems.get(1).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncodedContinuationToken() {
String specialCharFileName0 = "file0\uFFFE";
String specialCharFileName1 = "file1\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(specialCharFileName0, 1024);
primaryDirectoryClient.createFile(specialCharFileName1, 1024);
List<ShareFileItem> shareFileItems = new ArrayList<>();
for (PagedResponse<ShareFileItem> page : primaryDirectoryClient.listFilesAndDirectories().iterableByPage(1)) {
shareFileItems.addAll(page.getValue());
}
assertEquals(specialCharFileName0, shareFileItems.get(0).getName());
assertEquals(specialCharFileName1, shareFileItems.get(1).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncodedPrefix() {
String specialCharDirectoryName = "directory\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(specialCharDirectoryName);
List<ShareFileItem> shareFileItems = primaryDirectoryClient.listFilesAndDirectories().stream()
.collect(Collectors.toList());
assertEquals(1, shareFileItems.size());
assertTrue(shareFileItems.get(0).isDirectory());
assertEquals(specialCharDirectoryName, shareFileItems.get(0).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void testListFilesAndDirectoriesOAuth() {
ShareDirectoryClient dirClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP))
.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
List<String> fileNames = new ArrayList<>();
List<String> dirNames = new ArrayList<>();
for (int i = 0; i < 11; i++) {
fileNames.add(generatePathName());
}
for (int i = 0; i < 5; i++) {
dirNames.add(generatePathName());
}
for (String file : fileNames) {
dirClient.createFile(file, Constants.KB);
}
for (String directory : dirNames) {
dirClient.createSubdirectory(directory);
}
List<String> foundFiles = new ArrayList<>();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : dirClient.listFilesAndDirectories()) {
if (fileRef.isDirectory()) {
foundDirectories.add(fileRef.getName());
} else {
foundFiles.add(fileRef.getName());
}
}
assertTrue(fileNames.containsAll(foundFiles));
assertTrue(dirNames.containsAll(foundDirectories));
}
@Test
public void listMaxResultsByPage() {
primaryDirectoryClient.create();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
for (PagedResponse<ShareFileItem> page
: primaryDirectoryClient.listFilesAndDirectories(prefix, null, null, null).iterableByPage(1)) {
assertEquals(1, page.getValue().size());
}
}
@ParameterizedTest
@MethodSource("listHandlesSupplier")
public void listHandles(Integer maxResults, boolean recursive) {
primaryDirectoryClient.create();
List<HandleItem> handles = primaryDirectoryClient.listHandles(maxResults, recursive, null, null).stream()
.collect(Collectors.toList());
assertEquals(0, handles.size());
}
private static Stream<Arguments> listHandlesSupplier() {
return Stream.of(
Arguments.of(2, true),
Arguments.of(null, false));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void listHandlesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
String directoryName = generatePathName() + ".";
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(directoryName);
directoryClient.create();
List<HandleItem> handles = directoryClient.listHandles(null, false, null, null).stream()
.collect(Collectors.toList());
assertEquals(0, handles.size());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void listHandlesOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
List<HandleItem> handles = dirClient.listHandles(2, true, null, null).stream().collect(Collectors.toList());
assertEquals(0, handles.size());
}
@Test
public void listHandlesError() {
Exception e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.listHandles(null, true, null, null).iterator().hasNext());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
public void forceCloseHandleMin() {
primaryDirectoryClient.create();
CloseHandlesInfo handlesClosedInfo = primaryDirectoryClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
public void forceCloseHandleInvalidHandleId() {
primaryDirectoryClient.create();
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.forceCloseHandle("invalidHandleId"));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void forceCloseHandleOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
CloseHandlesInfo handlesClosedInfo = dirClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
public void forceCloseAllHandlesMin() {
primaryDirectoryClient.create();
CloseHandlesInfo handlesClosedInfo = primaryDirectoryClient.forceCloseAllHandles(false, null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void forceCloseAllHandlesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.create();
CloseHandlesInfo handlesClosedInfo = directoryClient.forceCloseAllHandles(false, null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameMin() {
primaryDirectoryClient.create();
assertDoesNotThrow(() -> primaryDirectoryClient.rename(generatePathName()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameWithResponse() {
primaryDirectoryClient.create();
Response<ShareDirectoryClient> resp = primaryDirectoryClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()), null, null);
ShareDirectoryClient renamedClient = resp.getValue();
assertDoesNotThrow(renamedClient::getProperties);
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.getProperties());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDifferentDirectory() {
primaryDirectoryClient.create();
ShareDirectoryClient destinationClient = shareClient.getDirectoryClient(generatePathName());
destinationClient.create();
String destinationPath = destinationClient.getFileClient(generatePathName()).getFilePath();
ShareDirectoryClient resultClient = primaryDirectoryClient.rename(destinationPath);
assertTrue(resultClient.exists());
assertFalse(primaryDirectoryClient.exists());
assertEquals(destinationPath, resultClient.getDirectoryPath());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameReplaceIfExists(boolean replaceIfExists) {
primaryDirectoryClient.create();
ShareFileClient destination = shareClient.getFileClient(generatePathName());
destination.create(512L);
boolean exception = false;
try {
primaryDirectoryClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath())
.setReplaceIfExists(replaceIfExists), null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(replaceIfExists, !exception);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameIgnoreReadOnly(boolean ignoreReadOnly) {
primaryDirectoryClient.create();
FileSmbProperties props = new FileSmbProperties().setNtfsFileAttributes(
EnumSet.of(NtfsFileAttributes.READ_ONLY));
ShareFileClient destinationFile = shareClient.getFileClient(generatePathName());
destinationFile.createWithResponse(512L, null, props, null, null, null, null, null);
ShareFileRenameOptions options = new ShareFileRenameOptions(destinationFile.getFilePath())
.setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true);
boolean exception = false;
try {
primaryDirectoryClient.renameWithResponse(options, null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(!ignoreReadOnly, exception);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFilePermission() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission);
ShareDirectoryClient destClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFilePermissionAndKeySet() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission)
.setSmbProperties(new FileSmbProperties()
.setFilePermissionKey("filePermissionkey"));
assertThrows(ShareStorageException.class, () ->
primaryDirectoryClient.renameWithResponse(options, null, null).getValue());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFileSmbProperties() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
String permissionKey = shareClient.createPermission(filePermission);
FileSmbProperties smbProperties = new FileSmbProperties()
.setFilePermissionKey(permissionKey)
.setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.DIRECTORY))
.setFileCreationTime(testResourceNamer.now().minusDays(5))
.setFileLastWriteTime(testResourceNamer.now().minusYears(2))
.setFileChangeTime(testResourceNamer.now());
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName()).setSmbProperties(smbProperties);
ShareDirectoryClient destClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
FileSmbProperties destSmbProperties = destClient.getProperties().getSmbProperties();
assertEquals(EnumSet.of(NtfsFileAttributes.DIRECTORY), destSmbProperties.getNtfsFileAttributes());
assertNotNull(destSmbProperties.getFileCreationTime());
assertNotNull(destSmbProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(destSmbProperties.getFileChangeTime(), testResourceNamer.now());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameMetadata() {
primaryDirectoryClient.create();
String key = "update";
String value = "value";
Map<String, String> updatedMetadata = Collections.singletonMap(key, value);
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setMetadata(updatedMetadata);
ShareDirectoryClient renamedClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
ShareDirectoryProperties properties = renamedClient.getProperties();
assertNotNull(properties.getMetadata().get(key));
assertEquals(value, renamedClient.getProperties().getMetadata().get(key));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String dirRename = generatePathName();
ShareFileRenameOptions options = new ShareFileRenameOptions(dirRename);
ShareDirectoryClient renamedClient = dirClient.renameWithResponse(options, null, null).getValue();
assertDoesNotThrow(renamedClient::getProperties);
assertEquals(dirRename, renamedClient.getDirectoryPath());
assertThrows(ShareStorageException.class, dirClient::getProperties);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameError() {
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.rename(generatePathName()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDestAC() {
primaryDirectoryClient.create();
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions().setLeaseId(leaseID);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.renameWithResponse(
new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null,
null), 200);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDestACFail() {
primaryDirectoryClient.create();
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions().setLeaseId(GARBAGE_LEASE_ID);
assertThrows(RuntimeException.class,
() -> primaryDirectoryClient.renameWithResponse(new ShareFileRenameOptions(pathName)
.setDestinationRequestConditions(src).setReplaceIfExists(true), null, null));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12")
public void testRenameSASToken() {
ShareFileSasPermission permissions = new ShareFileSasPermission()
.setReadPermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setDeletePermission(true);
OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1);
ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions);
String sas = shareClient.generateSas(sasValues);
ShareDirectoryClient client = getDirectoryClient(sas, primaryDirectoryClient.getDirectoryUrl());
primaryDirectoryClient.create();
String directoryName = generatePathName();
ShareDirectoryClient destClient = client.rename(directoryName);
assertNotNull(destClient);
destClient.getProperties();
assertEquals(directoryName, destClient.getDirectoryPath());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void renameTrailingDot() {
shareClient = getShareClient(shareName, true, true);
String directoryName = generatePathName() + ".";
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(directoryName);
directoryClient.create();
assertDoesNotThrow(() -> directoryClient.rename(directoryName));
}
@Test
public void createSubDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryWithResponse(
"testCreateSubDirectory", null, null, null, null, null), 201);
}
@Test
public void createSubDirectoryInvalidName() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectory("test/subdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.PARENT_NOT_FOUND);
}
@Test
public void createSubDirectoryMetadata() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryWithResponse(
"testCreateSubDirectory", null, null, testMetadata, null, null), 201);
}
@Test
public void createSubDirectoryMetadataError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryWithResponse("testsubdirectory", null, null,
Collections.singletonMap("", "value"), null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void createSubDirectoryFilePermission() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createSubdirectoryWithResponse("testCreateSubDirectory",
null, FILE_PERMISSION, null, null, null), 201);
}
@Test
public void createSubDirectoryFilePermissionKey() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createSubdirectoryWithResponse("testCreateSubDirectory", smbProperties, null, null,
null, null), 201);
}
@Test
public void createIfNotExistsSubDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions(), null, null), 201);
}
@Test
public void createIfNotExistsSubDirectoryAlreadyExists() {
String subdirectoryName = generatePathName();
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
primaryDirectoryClient.create();
int initialResponseCode = primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
subdirectoryName,
new ShareDirectoryCreateOptions(),
null, null)
.getStatusCode();
int secondResponseCode = primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
subdirectoryName,
new ShareDirectoryCreateOptions(),
null, null)
.getStatusCode();
assertEquals(201, initialResponseCode);
assertEquals(409, secondResponseCode);
}
@Test
public void createIfNotExistsSubDirectoryInvalidName() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryIfNotExists("test/subdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.PARENT_NOT_FOUND);
}
@Test
public void createIfNotExistsSubDirectoryMetadata() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setMetadata(testMetadata), null, null), 201);
}
@Test
public void createIfNotExistsSubDirectoryMetadataError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testsubdirectory",
new ShareDirectoryCreateOptions()
.setMetadata(Collections.singletonMap("", "value")),
null,
null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void createIfNotExistsSubDirectoryFilePermission() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION), null, null),
201);
}
@Test
public void testCreateIfNotExistsSubDirectoryFilePermissionKey() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setSmbProperties(smbProperties), null, null),
201);
}
@Test
public void testDeleteSubDirectory() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteSubdirectoryWithResponse(
subDirectoryName, null, null), 202);
}
@Test
public void deleteSubDirectoryError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.deleteSubdirectory("testsubdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsSubDirectory() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient
.deleteSubdirectoryIfExistsWithResponse(subDirectoryName, null, null), 202);
}
@Test
public void deleteIfExistsSubDirectoryMin() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
assertTrue(primaryDirectoryClient.deleteSubdirectoryIfExists(subDirectoryName));
}
@Test
public void deleteIfExistsSubDirectoryThatDoesNotExist() {
primaryDirectoryClient.create();
Response<Boolean> response = primaryDirectoryClient.deleteSubdirectoryIfExistsWithResponse("testsubdirectory",
null, null);
assertEquals(404, response.getStatusCode());
assertFalse(response.getValue());
}
@Test
public void createFile() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createFileWithResponse("testCreateFile", 1024, null, null, null, null, null, null),
201);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileInvalidArgs(String fileName, long maxSize, int statusCode, ShareErrorCode errMsg) {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createFileWithResponse(fileName, maxSize, null, null, null, null, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, statusCode, errMsg);
}
@Test
public void createFileMaxOverload() {
primaryDirectoryClient.create();
ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("txt");
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createFileWithResponse("testCreateFile", 1024, httpHeaders, smbProperties,
FILE_PERMISSION, testMetadata, null, null), 201);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileMaxOverloadInvalidArgs(String fileName, long maxSize, ShareFileHttpHeaders httpHeaders,
Map<String, String> metadata, ShareErrorCode errMsg) {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createFileWithResponse(fileName, maxSize, httpHeaders, null, null, metadata,
null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, errMsg);
}
@Test
public void deleteFile() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.deleteFileWithResponse(fileName, null, null), 202);
}
@Test
public void deleteFileError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.deleteFileWithResponse("testfile", null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsFileMin() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
assertTrue(primaryDirectoryClient.deleteFileIfExists(fileName));
}
@Test
public void deleteIfExistsFile() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteFileIfExistsWithResponse(fileName,
null, null), 202);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
primaryDirectoryClient.create();
Response<Boolean> response = primaryDirectoryClient.deleteFileIfExistsWithResponse("testfile", null, null);
assertEquals(404, response.getStatusCode());
assertFalse(response.getValue());
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareDirectoryClient shareSnapshotClient = directoryBuilderHelper(shareName, directoryPath).snapshot(snapshot)
.buildDirectoryClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryDirectoryClient.getShareName());
}
@Test
public void getDirectoryPath() {
assertEquals(directoryPath, primaryDirectoryClient.getDirectoryPath());
}
@Test
public void testPerCallPolicy() {
primaryDirectoryClient.create();
ShareDirectoryClient directoryClient = directoryBuilderHelper(primaryDirectoryClient.getShareName(),
primaryDirectoryClient.getDirectoryPath())
.addPolicy(getPerCallVersionPolicy()).buildDirectoryClient();
Response<ShareDirectoryProperties> response = directoryClient.getPropertiesWithResponse(null, null);
assertDoesNotThrow(() -> response.getHeaders().getValue("x-ms-version").equals("2017-11-09"));
}
@ParameterizedTest
@ValueSource(strings = {"", "/"})
public void rootDirectorySupport(String rootDirPath) {
String dir1Name = "dir1";
String dir2Name = "dir2";
shareClient.createDirectory(dir1Name).createSubdirectory(dir2Name);
ShareDirectoryClient rootDirectory = shareClient.getDirectoryClient(rootDirPath);
assertTrue(rootDirectory.exists());
assertTrue(rootDirectory.getSubdirectoryClient(dir1Name).exists());
}
@Test
public void createShareWithSmallTimeoutsFailForServiceClient() {
int maxRetries = 5;
long retryDelayMillis = 1000;
for (int i = 0; i < maxRetries; i++) {
try {
HttpClientOptions clientOptions = new HttpClientOptions()
.setApplicationId("client-options-id")
.setResponseTimeout(Duration.ofNanos(1))
.setReadTimeout(Duration.ofNanos(1))
.setWriteTimeout(Duration.ofNanos(1))
.setConnectTimeout(Duration.ofNanos(1));
ShareServiceClientBuilder clientBuilder = new ShareServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.retryOptions(new RequestRetryOptions(null, 1, (Integer) null, null, null, null))
.clientOptions(clientOptions);
ShareServiceClient serviceClient = clientBuilder.buildClient();
assertThrows(RuntimeException.class, () -> serviceClient.createShareWithResponse(generateShareName(),
null, Duration.ofSeconds(10), null));
return;
} catch (Exception e) {
try {
Thread.sleep(retryDelayMillis);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}
}
@Test
public void defaultAudience() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(null) /* should default to "https:
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
@Test
public void storageAccountAudience() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(primaryDirectoryClient.getAccountName())));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
@Test
public void audienceError() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badAudience")));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
ShareStorageException e = assertThrows(ShareStorageException.class, aadDirClient::exists);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
} |
made it verbose. | public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
} | LOGGER.info("Token not found in the MSAL cache."); | public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Acquire a token from the confidential client.
*
* @param request the details of the token request
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
}
/**
* Acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Acquire a token from the confidential client.
*
* @param request the details of the token request
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
}
/**
* Acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} |
same as above | public void getNonEncodedFileName(String fileName) {
ShareFileClient fileClient = shareClient.getFileClient(fileName);
assertEquals(fileName, fileClient.getFilePath());
fileClient.create(1024);
assertTrue(fileClient.exists());
} | assertEquals(fileName, fileClient.getFilePath()); | public void getNonEncodedFileName(String fileName) {
ShareFileClient fileClient = shareClient.getFileClient(fileName);
assertEquals(fileName, fileClient.getFilePath());
fileClient.create(1024);
assertTrue(fileClient.exists());
} | class FileApiTests extends FileShareTestBase {
private ShareFileClient primaryFileClient;
private ShareClient shareClient;
private String shareName;
private String filePath;
private static Map<String, String> testMetadata;
private static ShareFileHttpHeaders httpHeaders;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
filePath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en")
.setContentType("application/octet-stream");
smbProperties = new FileSmbProperties()
.setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL));
}
@Test
public void getFileURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String fileURL = primaryFileClient.getFileUrl();
assertEquals(expectURL, fileURL);
}
@Test
public void getShareSnapshotURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot();
expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot();
ShareFileClient newFileClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot())
.buildClient().getFileClient(filePath);
String fileURL = newFileClient.getFileUrl();
assertEquals(expectURL, fileURL);
String snapshotEndpoint = String.format("https:
shareName, filePath, shareSnapshotInfo.getSnapshot());
ShareFileClient client = getFileClient(StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint);
assertEquals(client.getFileUrl(), snapshotEndpoint);
}
@Test
public void exists() {
primaryFileClient.create(Constants.KB);
assertTrue(primaryFileClient.exists());
}
@Test
public void doesNotExist() {
assertFalse(primaryFileClient.exists());
}
@Test
public void existsError() {
primaryFileClient = fileBuilderHelper(shareName, filePath)
.sasToken("sig=dummyToken").buildFileClient();
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.exists());
assertEquals(e.getResponse().getStatusCode(), 403);
}
@Test
public void createFile() {
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(1024, null, null, null, null,
null, null), 201);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@Test
public void createFile4TB() {
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(4 * Constants.TB, null, null,
null, null, null,
null), 201);
}
@Test
public void createFileError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.create(-1));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT);
}
@Test
public void createFileWithArgsFpk() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
Response<ShareFileInfo> resp = primaryFileClient
.createWithResponse(1024, httpHeaders, smbProperties, null, testMetadata, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createFileWithArgsFp() {
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
Response<ShareFileInfo> resp = primaryFileClient.createWithResponse(1024, httpHeaders, smbProperties,
FILE_PERMISSION, testMetadata, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void createChangeTime() {
OffsetDateTime changeTime = testResourceNamer.now();
primaryFileClient.createWithResponse(512, null, new FileSmbProperties().setFileChangeTime(changeTime),
null, null, null, null, null);
FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void createFileOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
Response<ShareFileInfo> result = fileClient.createWithResponse(Constants.KB, null, null, null, null, null,
null);
assertEquals(fileClient.getShareName(), shareName);
String[] filePath = fileClient.getFilePath().split("/");
assertEquals(fileName, filePath[1]);
assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG));
}
@Test
public void createFileWithArgsError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.createWithResponse(-1, null, null, null,
testMetadata, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT);
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createFilePermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties smbProperties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
assertThrows(IllegalArgumentException.class, () ->
primaryFileClient.createWithResponse(1024, null, smbProperties, permission, null, null, null));
}
private static Stream<Arguments> permissionAndKeySupplier() {
return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION),
Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB))));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void createFileTrailingDot(boolean allowTrailingDot) {
shareClient = getShareClient(shareName, allowTrailingDot, null);
ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient();
String fileName = generatePathName();
String fileNameWithDot = fileName + ".";
ShareFileClient fileClient = rootDirectory.getFileClient(fileNameWithDot);
fileClient.create(1024);
List<String> foundFiles = new ArrayList<>();
for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) {
foundFiles.add(fileRef.getName());
}
if (allowTrailingDot) {
assertEquals(fileNameWithDot, foundFiles.get(0));
} else {
assertEquals(fileName, foundFiles.get(0));
}
}
@Test
public void uploadAndDownloadData() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null,
null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void uploadAndDownloadDataWithArgs() {
primaryFileClient.create(1024);
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L),
null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient
.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void uploadAndDownloadDataOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void parallelUploadAndDownloadData() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()),
null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null,
null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void parallelUploadAndDownloadDataWithArgs() {
primaryFileClient.create(1024);
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L),
null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream,
new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void parallelUploadInputStreamNoLength() {
primaryFileClient.create(DATA.getDefaultDataSize());
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null);
ByteArrayOutputStream os = new ByteArrayOutputStream();
primaryFileClient.download(os);
assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes());
}
@Test
public void parallelUploadInputStreamBadLength() {
int[] lengths = new int[]{0, -100, DATA.getDefaultDataSize() - 1, DATA.getDefaultDataSize() + 1};
for (int length : lengths) {
primaryFileClient.create(DATA.getDefaultDataSize());
assertThrows(Exception.class, () ->
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
length), null, null));
}
}
@Test
public void uploadSuccessfulRetry() {
primaryFileClient.create(DATA.getDefaultDataSize());
ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(),
primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null);
ByteArrayOutputStream os = new ByteArrayOutputStream();
primaryFileClient.download(os);
assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes());
}
@Test
public void uploadRangeAndDownloadData() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null,
null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void uploadRangeAndDownloadDataWithArgs() {
primaryFileClient.create(1024);
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L),
null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream,
new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void downloadAllNull() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse response = primaryFileClient.downloadWithResponse(stream, null, null, null);
byte[] body = stream.toByteArray();
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertArrayEquals(DATA.getDefaultBytes(), body);
CoreUtils.isNullOrEmpty(headers.getMetadata());
assertNotNull(headers.getContentLength());
assertNotNull(headers.getContentType());
assertNull(headers.getContentMd5());
assertNull(headers.getContentEncoding());
assertNull(headers.getCacheControl());
assertNull(headers.getContentDisposition());
assertNull(headers.getContentLanguage());
}
@ParameterizedTest
@ValueSource(ints = {0, 1})
public void downloadEmptyFile(int fileSize) {
primaryFileClient.create(fileSize);
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
primaryFileClient.download(outStream);
byte[] result = outStream.toByteArray();
assertEquals(result.length, fileSize);
if (fileSize > 0) {
assertEquals(0, result[0]);
}
}
/*
This is to test the appropriate integration of DownloadResponse, including setting the correct range values on
HttpGetterInfo.
*/
@Test
public void downloadWithRetryRange() {
/*
We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing
a retry per the DownloadRetryOptions. The next request should have the same range header, which was generated
from the count and offset values in HttpGetterInfo that was constructed on the initial call to download. We
don't need to check the data here, but we want to ensure that the correct range is set each time. This will
test the correction of a bug that was found which caused HttpGetterInfo to have an incorrect offset when it was
constructed in FileClient.download().
*/
primaryFileClient.create(DATA.getDefaultDataSizeLong());
primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ShareFileClient fc2 = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(),
primaryFileClient.getFileUrl(), new MockRetryRangeResponsePolicy("bytes=2-6"));
ShareFileRange range = new ShareFileRange(2, 6L);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(3);
RuntimeException e = assertThrows(RuntimeException.class,
() -> fc2.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileDownloadOptions()
.setRange(range).setRetryOptions(options), null, null));
/*
Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is
NOT thrown because the types would not match.
*/
assertInstanceOf(IOException.class, e.getCause());
}
@Test
public void downloadRetryDefault() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ShareFileClient failureClient = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(),
primaryFileClient.getFileUrl(), new MockFailureResponsePolicy(5));
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
failureClient.download(outStream);
String bodyStr = outStream.toString();
assertEquals(bodyStr, DATA.getDefaultText());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void downloadTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.create(DATA.getDefaultDataSizeLong());
shareFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize());
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
shareFileClient.download(outStream);
String downloadedData = outStream.toString();
assertEquals(downloadedData, DATA.getDefaultText());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void downloadOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(DATA.getDefaultDataSizeLong());
fileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ShareFileProperties properties = fileClient.getProperties();
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse response = fileClient.downloadWithResponse(stream, null, null, null);
byte[] body = stream.toByteArray();
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertArrayEquals(body, DATA.getDefaultBytes());
CoreUtils.isNullOrEmpty(headers.getMetadata());
assertEquals(headers.getContentLength(), properties.getContentLength());
assertEquals(headers.getContentType(), properties.getContentType());
assertEquals(headers.getContentMd5(), properties.getContentMd5());
assertEquals(headers.getContentEncoding(), properties.getContentEncoding());
assertEquals(headers.getCacheControl(), properties.getCacheControl());
assertEquals(headers.getContentDisposition(), properties.getContentDisposition());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@Test
public void uploadRange4TB() {
long fileSize = 4 * Constants.TB;
primaryFileClient.create(fileSize);
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())
.setOffset(fileSize - DATA.getDefaultDataSizeLong()), null, null); /* Upload to end of file. */
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream,
new ShareFileRange(fileSize - DATA.getDefaultDataSizeLong(), fileSize), true, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
}
@ParameterizedTest
@ValueSource(longs = {
4 * Constants.MB,
5 * Constants.MB})
public void uploadBufferedRangeGreaterThanMaxPutRange(long length) {
primaryFileClient.create(length);
ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length));
assertDoesNotThrow(() -> primaryFileClient.upload(data, length, null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void uploadRangeTrailingDot() {
primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(DATA.getDefaultDataSizeLong());
ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(options, null, null);
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(),
null, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
FileShareTestHelper.assertResponseStatusCode(downloadResponse, 200);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void uploadRangeOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@ParameterizedTest
@MethodSource("bufferedUploadVariousPartitions")
public void bufferedUploadVariousPartitions(Long length, Long uploadChunkLength) {
primaryFileClient.create(length);
ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper
.getRandomBuffer(Math.toIntExact(length)));
assertNotNull(primaryFileClient.upload(data, length, new ParallelTransferOptions()
.setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength)));
}
private static Stream<Arguments> bufferedUploadVariousPartitions() {
return Stream.of(
Arguments.of(1024L, null),
Arguments.of(1024L, 1024L),
Arguments.of(1024L, 256L),
Arguments.of(4L * Constants.MB, null),
Arguments.of(4L * Constants.MB, 1024L),
Arguments.of(20L * Constants.MB, null),
Arguments.of(20L * Constants.MB, 4L * Constants.MB)
);
}
@Test
public void bufferedUploadErrorPartitionTooBig() {
long length = 20 * Constants.MB;
long uploadChunkLength = 20 * Constants.MB;
primaryFileClient.create(length);
ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length));
assertThrows(Exception.class, () -> primaryFileClient.upload(data, length, new ParallelTransferOptions()
.setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength)));
}
@Test
public void uploadDataError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () ->
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong()).setOffset(1L), null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void parallelUploadDataError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () ->
primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void uploadRangeDataError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void uploadDataRetryOnTransientFailure() {
ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(),
primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy());
primaryFileClient.create(1024);
clientWithFailure.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ByteArrayOutputStream os = new ByteArrayOutputStream();
primaryFileClient.downloadWithResponse(os, new ShareFileRange(0, DATA.getDefaultDataSizeLong() - 1), null, null,
null);
assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes());
}
@Test
public void uploadAndClearRange() {
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileClient.create(fullInfoString.length());
primaryFileClient.uploadRange(fullInfoData, fullInfoString.length());
primaryFileClient.clearRange(7);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
primaryFileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null);
for (byte b : stream.toByteArray()) {
assertEquals(0, b);
}
}
@Test
public void uploadAndClearRangeWithArgs() {
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileClient.create(fullInfoString.length());
primaryFileClient.uploadRange(fullInfoData, fullInfoString.length());
primaryFileClient.clearRangeWithResponse(7, 1, null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, 7L), false, null, null);
for (byte b : stream.toByteArray()) {
assertEquals(0, b);
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void clearRangeTrailingDot() {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(DATA.getDefaultDataSizeLong());
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.clearRangeWithResponse(
DATA.getDefaultDataSizeLong(), 0, null, null), 201);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void uploadAndClearRangeOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
fileClient.create(fullInfoString.length());
fileClient.uploadRange(fullInfoData, fullInfoString.length());
fileClient.clearRange(7);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
fileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null);
for (byte b : stream.toByteArray()) {
assertEquals(0, b);
}
}
@Test
public void clearRangeError() {
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileClient.create(fullInfoString.length());
primaryFileClient.uploadRange(fullInfoData, fullInfoString.length());
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRange(30));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE);
}
@Test
public void clearRangeErrorArgs() {
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileClient.create(fullInfoString.length());
primaryFileClient.uploadRange(fullInfoData, fullInfoString.length());
ShareStorageException e = assertThrows(ShareStorageException.class, () ->
primaryFileClient.clearRangeWithResponse(7, 20, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE);
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadDataLengthMismatch(int size, String errMsg) {
primaryFileClient.create(1024);
UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class,
() -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
DATA.getDefaultInputStream(), size), null, Context.NONE));
assertTrue(e.getMessage().contains(errMsg));
}
private static Stream<Arguments> uploadDataLengthMismatchSupplier() {
return Stream.of(
Arguments.of(6, "more than"),
Arguments.of(8, "less than"));
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void parallelUploadDataLengthMismatch(int size, String errMsg) {
primaryFileClient.create(1024);
UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class,
() -> primaryFileClient.upload(DATA.getDefaultInputStream(), size, null));
assertTrue(e.getMessage().contains(errMsg));
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadRangeLengthMismatch(int size, String errMsg) {
primaryFileClient.create(1024);
UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class,
() -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), size));
assertTrue(e.getMessage().contains(errMsg));
}
@Test
public void downloadDataError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileRange(0, 1023L),
false, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void uploadFileDoesNotExist() {
File uploadFile = new File(testFolder.getPath() + "/fakefile.txt");
if (uploadFile.exists()) {
assert uploadFile.delete();
}
UncheckedIOException e = assertThrows(UncheckedIOException.class,
() -> primaryFileClient.uploadFromFile(uploadFile.getPath()));
assertInstanceOf(NoSuchFileException.class, e.getCause());
uploadFile.delete();
}
/*
* Tests downloading a file using a default client that doesn't have a HttpClient passed to it.
*/
@LiveOnly
@ParameterizedTest
@ValueSource(ints = {
0,
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
})
public void downloadFileBufferCopy(int fileSize) throws IOException {
ShareServiceClient shareServiceClient = new ShareServiceClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.buildClient();
ShareFileClient fileClient = shareServiceClient.getShareClient(shareName)
.createFile(filePath, fileSize);
File file = FileShareTestHelper.getRandomFile(fileSize);
fileClient.uploadFromFile(file.toPath().toString());
File outFile = new File(generatePathName() + ".txt");
if (outFile.exists()) {
assertTrue(outFile.delete());
}
fileClient.downloadToFile(outFile.toPath().toString());
assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize));
shareServiceClient.deleteShare(shareName);
outFile.delete();
file.delete();
}
@Test
public void uploadAndDownloadFileExists() throws IOException {
String data = "Download file exists";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (!downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileClient.create(data.length());
primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)),
data.length());
UncheckedIOException e = assertThrows(UncheckedIOException.class,
() -> primaryFileClient.downloadToFile(downloadFile.getPath()));
assertInstanceOf(FileAlreadyExistsException.class, e.getCause());
downloadFile.delete();
}
@Test
public void uploadAndDownloadToFileDoesNotExist() throws IOException {
String data = "Download file DoesNotExist";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileClient.create(data.length());
primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)),
data.length());
primaryFileClient.downloadToFile(downloadFile.getPath());
Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z");
assertEquals(data, scanner.next());
scanner.close();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), downloadFile.getName());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void uploadRangePreserveFileLastWrittenOn() {
FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE};
for (FileLastWrittenMode mode : modes) {
primaryFileClient.create(Constants.KB);
ShareFileProperties initialProps = primaryFileClient.getProperties();
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)),
Constants.KB).setLastWrittenMode(mode), null, null);
ShareFileProperties resultProps = primaryFileClient.getProperties();
if (mode.equals(FileLastWrittenMode.PRESERVE)) {
assertEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties()
.getFileLastWriteTime());
} else {
assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties()
.getFileLastWriteTime());
}
}
}
@Disabled("the groovy test was not testing this test properly. need to investigate this test further.")
@ParameterizedTest
@ValueSource(strings = {
"",
"ü1ü" /* Something that needs to be url encoded. */
})
public void uploadRangeFromURL(String pathSuffix) {
primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient();
primaryFileClient.create(1024);
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length());
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileClient.getShareName())
.setFilePath(primaryFileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileClient client = fileBuilderHelper(shareName, "destination" + pathSuffix)
.endpoint(primaryFileClient.getFileUrl().toString())
.buildFileClient();
client.create(1024);
client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileClient.getFileUrl() + "?"
+ sasToken);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
client.download(stream);
String result = new String(stream.toByteArray());
for (int i = 0; i < length; i++) {
assertEquals(result.charAt(destinationOffset + i), data.charAt(sourceOffset + i));
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void uploadRangeFromURLOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClientSharedKey(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(1024);
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
fileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length());
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(fileClient.getShareName())
.setFilePath(fileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
String fileNameDest = generatePathName();
ShareFileClient fileClientDest = dirClient.getFileClient(fileNameDest);
fileClientDest.create(1024);
Response<ShareFileUploadRangeFromUrlInfo> uploadResponse = fileClientDest.uploadRangeFromUrlWithResponse(length,
destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken, null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = fileClientDest.downloadWithResponse(stream, null, null, null, null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), 1024);
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertEquals(stream.toByteArray()[0], 117);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void uploadRangeFromUrlPreserveFileLastWrittenOn() {
FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE};
primaryFileClient.create(Constants.KB);
ShareFileClient destinationClient = shareClient.getFileClient(generatePathName());
destinationClient.create(Constants.KB);
ShareFileProperties initialProps = destinationClient.getProperties();
primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)),
Constants.KB);
StorageSharedKeyCredential credential = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileClient.getShareName())
.setFilePath(primaryFileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
for (FileLastWrittenMode mode : modes) {
destinationClient.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(Constants.KB,
primaryFileClient.getFileUrl() + "?" + sasToken).setLastWrittenMode(mode), null, null);
ShareFileProperties resultProps = destinationClient.getProperties();
if (mode.equals(FileLastWrittenMode.PRESERVE)) {
assertTrue(FileShareTestHelper.compareDatesWithPrecision(
initialProps.getSmbProperties().getFileLastWriteTime(),
resultProps.getSmbProperties().getFileLastWriteTime()));
} else {
assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties()
.getFileLastWriteTime());
}
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void uploadRangeFromUrlTrailingDot() {
shareClient = getShareClient(shareName, true, true);
ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient();
ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + ".");
sourceClient.create(Constants.KB);
ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + ".");
destinationClient.create(Constants.KB);
sourceClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)),
Constants.KB);
ShareFileSasPermission permissions = new ShareFileSasPermission()
.setReadPermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setDeletePermission(true);
OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1);
ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions);
String sasToken = shareClient.generateSas(sasValues);
Response<ShareFileUploadRangeFromUrlInfo> res = destinationClient.uploadRangeFromUrlWithResponse(
new ShareFileUploadRangeFromUrlOptions(Constants.KB, sourceClient.getFileUrl() + "?" + sasToken), null,
null);
FileShareTestHelper.assertResponseStatusCode(res, 201);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void uploadRangeFromUrlTrailingDotFail() {
shareClient = getShareClient(shareName, true, false);
ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient();
ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + ".");
sourceClient.create(DATA.getDefaultDataSizeLong());
ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + ".");
destinationClient.create(DATA.getDefaultDataSizeLong());
sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
assertThrows(ShareStorageException.class, () -> destinationClient
.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(DATA.getDefaultDataSizeLong(),
sourceClient.getFileUrl()), null, null));
}
@Test
public void openInputStreamWithRange() throws IOException {
primaryFileClient.create(1024);
ShareFileRange shareFileRange = new ShareFileRange(5L, 10L);
byte[] dataBytes = "long test string".getBytes(StandardCharsets.UTF_8);
ByteArrayInputStream inputStreamData = new ByteArrayInputStream(dataBytes);
primaryFileClient.upload(inputStreamData, dataBytes.length, null);
int totalBytesRead = 0;
StorageFileInputStream stream = primaryFileClient.openInputStream(shareFileRange);
while (stream.read() != -1) {
totalBytesRead++;
}
stream.close();
assertEquals(6, totalBytesRead);
}
@ParameterizedTest
@ValueSource(strings = {
"",
"ü1ü" /* Something that needs to be url encoded. */
})
public void startCopy(String pathSuffix) {
primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient();
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, new ShareFileCopyOptions(),
null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, smbProperties,
setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly,
setArchiveAttribute, null, null, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void startCopyTrailingDot() {
shareClient = getShareClient(shareName, true, true);
ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + ".");
sourceClient.create(1024);
ShareFileClient destClient = shareClient.getFileClient(generatePathName() + ".");
destClient.create(1024);
byte[] data = FileShareTestHelper.getRandomBuffer(Constants.KB);
ByteArrayInputStream inputStream = new ByteArrayInputStream(data);
sourceClient.uploadRange(inputStream, Constants.KB);
SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceClient.getFileUrl(),
new ShareFileCopyOptions(), null);
poller.waitForCompletion();
assertEquals(poller.poll().getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void startCopyTrailingDotFail() {
shareClient = getShareClient(shareName, true, false);
ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + ".");
sourceClient.create(1024);
ShareFileClient destClient = shareClient.getFileClient(generatePathName() + ".");
destClient.create(1024);
sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize());
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap between"
+ " the time subscribed and the time we start observing events.")
@Test
public void startCopyError() {
primaryFileClient.create(1024);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy("some url", testMetadata, null);
ShareStorageException e = assertThrows(ShareStorageException.class, poller::waitForCompletion);
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.INVALID_HEADER_VALUE);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(setFilePermission ? FILE_PERMISSION : null)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setPermissionCopyModeType(permissionType);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@Test
public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setIgnoreReadOnly(true)
.setArchiveAttribute(true);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void startCopyWithOptionsFilePermission() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void startCopyWithOptionsChangeTime() {
ShareFileInfo client = primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
smbProperties.setFileChangeTime(testResourceNamer.now());
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(),
primaryFileClient.getProperties().getSmbProperties().getFileChangeTime());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs)
.setFilePermissionKey(filePermissionKey);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@Test
public void startCopyWithOptionLease() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileClient).acquireLease();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@Test
public void startCopyWithOptionsInvalidLease() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
assertThrows(ShareStorageException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null));
}
@Test
public void startCopyWithOptionsMetadata() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setMetadata(testMetadata);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void startCopyWithOptionsWithOriginalSmbProperties() {
primaryFileClient.create(1024);
ShareFileProperties initialProperties = primaryFileClient.getProperties();
OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime();
OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime();
OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime();
EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes();
String sourceURL = primaryFileClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileClient).acquireLease();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(true)
.setLastWrittenOn(true)
.setChangedOn(true)
.setFileAttributes(true);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions)
.setSmbPropertiesToCopy(list);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
ShareFileProperties resultProperties = primaryFileClient.getProperties();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getSmbProperties()
.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getSmbProperties()
.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getSmbProperties()
.getFileChangeTime());
assertEquals(fileAttributes, resultProperties.getSmbProperties().getNtfsFileAttributes());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn,
boolean fileAttributes) {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(createdOn)
.setLastWrittenOn(lastWrittenOn)
.setChangedOn(changedOn)
.setFileAttributes(fileAttributes);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFileChangeTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE)
.setSmbPropertiesToCopy(list);
assertThrows(IllegalArgumentException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void startCopyOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient sourceClient = dirClient.getFileClient(generatePathName());
sourceClient.create(DATA.getDefaultDataSizeLong());
ShareFileClient destClient = dirClient.getFileClient(generatePathName());
destClient.create(DATA.getDefaultDataSizeLong());
sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
String sourceURL = sourceClient.getFileUrl();
SyncPoller<ShareFileCopyInfo, Void> poller = sourceClient.beginCopy(sourceURL, new ShareFileCopyOptions(),
null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
}
@Test
public void abortCopy() {
int fileSize = Constants.MB;
byte[] bytes = new byte[fileSize];
ByteArrayInputStream data = new ByteArrayInputStream(bytes);
ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient();
primaryFileClient.create(fileSize);
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient();
dest.create(fileSize);
SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId()));
}
@Test
public void abortCopyLease() {
int fileSize = Constants.MB;
byte[] bytes = new byte[fileSize];
ByteArrayInputStream data = new ByteArrayInputStream(bytes);
ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient();
primaryFileClient.create(fileSize);
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient();
dest.create(fileSize);
String leaseId = createLeaseClient(primaryFileClient).acquireLease();
ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(
sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
assertThrows(ShareStorageException.class, () -> dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(),
requestConditions, null, null));
}
@Test
public void abortCopyInvalidLease() {
int fileSize = Constants.MB;
byte[] bytes = new byte[fileSize];
ByteArrayInputStream data = new ByteArrayInputStream(bytes);
ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient();
primaryFileClient.create(fileSize);
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient();
dest.create(fileSize);
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
assertThrows(ShareStorageException.class, () -> {
SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(
sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(),
requestConditions, null, null);
});
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void abortCopyTrailingDot() {
ByteArrayInputStream data = new ByteArrayInputStream(new byte[Constants.MB]);
String fileName = generatePathName() + ".";
ShareFileClient primaryFileClient = getFileClient(shareName, fileName, true, null);
primaryFileClient.create(Constants.MB);
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileClient dest = fileBuilderHelper(shareName, fileName).buildFileClient();
dest.create(Constants.MB);
SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId()));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void abortCopyOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient sourceClient = dirClient.getFileClient(fileName);
sourceClient.create(DATA.getDefaultDataSizeLong());
sourceClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null);
String sourceURL = sourceClient.getFileUrl();
ShareFileClient destClient = dirClient.getFileClient(generatePathName());
destClient.create(DATA.getDefaultDataSizeLong());
SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
assertThrows(ShareStorageException.class, () -> destClient.abortCopy(pollResponse.getValue().getCopyId()));
}
@Test
public void abortCopyError() {
assertThrows(ShareStorageException.class, () -> primaryFileClient.abortCopy("randomId"));
}
@Test
public void deleteFile() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void deleteFileTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.create(1024);
FileShareTestHelper.assertResponseStatusCode(shareFileClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void deleteFileOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(Constants.KB);
FileShareTestHelper.assertResponseStatusCode(fileClient.deleteWithResponse(null, null), 202);
}
@Test
public void deleteFileError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.deleteWithResponse(null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsFile() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteIfExistsWithResponse(null, null, null),
202);
}
@Test
public void deleteIfExistsFileMin() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
primaryFileClient.deleteIfExists();
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
ShareFileClient client = shareClient.getFileClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null, null);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertFalse(client.exists());
}
@Test
public void deleteIfExistsFileThatWasAlreadyDeleted() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
assertTrue(primaryFileClient.deleteIfExists());
assertFalse(primaryFileClient.deleteIfExists());
}
@Test
public void getProperties() {
primaryFileClient.create(1024);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
Response<ShareFileProperties> resp = primaryFileClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void getPropertiesTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.create(1024);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
Response<ShareFileProperties> resp = shareFileClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void getPropertiesOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
ShareFileInfo createInfo = fileClient.create(Constants.KB);
ShareFileProperties properties = fileClient.getProperties();
assertEquals(createInfo.getETag(), properties.getETag());
assertEquals(createInfo.getLastModified(), properties.getLastModified());
assertEquals(createInfo.getSmbProperties().getFilePermissionKey(),
properties.getSmbProperties().getFilePermissionKey());
assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(),
properties.getSmbProperties().getNtfsFileAttributes());
assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(),
properties.getSmbProperties().getFileLastWriteTime());
assertEquals(createInfo.getSmbProperties().getFileCreationTime(),
properties.getSmbProperties().getFileCreationTime());
assertEquals(createInfo.getSmbProperties().getFileChangeTime(),
properties.getSmbProperties().getFileChangeTime());
assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId());
assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId());
}
@Test
public void getPropertiesError() {
ShareStorageException ex = assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties());
assertTrue(ex.getMessage().contains("ResourceNotFound"));
}
@Test
public void setHttpHeadersFpk() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
null, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void setHttpHeadersFp() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
FILE_PERMISSION, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void setHttpHeadersChangeTime() {
primaryFileClient.create(512);
OffsetDateTime changeTime = testResourceNamer.now();
primaryFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null);
FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setHttpHeadersTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.create(1024);
OffsetDateTime changeTime = testResourceNamer.now();
shareFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null);
FileShareTestHelper.compareDatesWithPrecision(shareFileClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setHttpHeadersOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(Constants.KB);
httpHeaders = new ShareFileHttpHeaders()
.setContentType("application/octet-stream")
.setContentDisposition("attachment")
.setCacheControl("no-transform")
.setContentEncoding("gzip")
.setContentLanguage("en");
Response<ShareFileInfo> res = fileClient.setPropertiesWithResponse(Constants.KB, httpHeaders, null, null, null,
null);
ShareFileProperties properties = fileClient.getProperties();
FileShareTestHelper.assertResponseStatusCode(res, 200);
assertNotNull(res.getValue().getETag());
assertEquals(res.getValue().getETag(), res.getHeaders().getValue(HttpHeaderName.ETAG));
assertEquals(properties.getContentType(), "application/octet-stream");
assertEquals(properties.getContentDisposition(), "attachment");
assertEquals(properties.getCacheControl(), "no-transform");
assertEquals(properties.getContentEncoding(), "gzip");
assertNull(properties.getContentMd5());
}
@Test
public void setHttpHeadersError() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
ShareStorageException e = assertThrows(ShareStorageException.class, () ->
primaryFileClient.setPropertiesWithResponse(-1, null, null, null, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT);
}
@Test
public void setMetadata() {
primaryFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareFileProperties getPropertiesBefore = primaryFileClient.getProperties();
Response<ShareFileMetadataInfo> setPropertiesResponse = primaryFileClient
.setMetadataWithResponse(updatedMetadata, null, null);
ShareFileProperties getPropertiesAfter = primaryFileClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setMetadataTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareFileProperties getPropertiesBefore = shareFileClient.getProperties();
Response<ShareFileMetadataInfo> setPropertiesResponse = shareFileClient.setMetadataWithResponse(updatedMetadata,
null, null);
ShareFileProperties getPropertiesAfter = shareFileClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setMetadataOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.createWithResponse(Constants.KB, null, null, null, testMetadata, null, null);
ShareFileProperties getPropertiesBefore = fileClient.getProperties();
Response<ShareFileMetadataInfo> setPropertiesResponse = fileClient.setMetadataWithResponse(updatedMetadata,
null, null);
ShareFileProperties getPropertiesAfter = fileClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@Test
public void setMetadataError() {
primaryFileClient.create(1024);
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.setMetadataWithResponse(errorMetadata, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void listRanges() throws IOException {
String fileName = generatePathName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
primaryFileClient.listRanges().forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void listRangesTrailingDot() throws IOException {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(1024);
String fileName = generatePathName() + ".";
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
primaryFileClient.listRanges().forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesWithRange() throws IOException {
String fileName = generatePathName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesSnapshot() throws IOException {
String fileName = generatePathName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
primaryFileClient = fileBuilderHelper(shareName, filePath)
.snapshot(snapInfo.getSnapshot())
.buildFileClient();
primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesSnapshotFail() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
primaryFileClient = fileBuilderHelper(shareName, filePath)
.snapshot("2020-08-07T16:58:02.0000000Z")
.buildFileClient();
assertThrows(ShareStorageException.class, () ->
primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
}));
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void listRangesOAuth() throws IOException {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(Constants.KB);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
fileClient.uploadFromFile(uploadFile);
fileClient.listRanges().forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear,
List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) {
primaryFileClient.create(4 * Constants.MB);
primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(4 * Constants.MB)),
4 * Constants.MB);
String snapshotId = primaryFileServiceClient.getShareClient(primaryFileClient.getShareName())
.createSnapshot()
.getSnapshot();
rangesToUpdate.forEach(it -> {
long size = it.getEnd() - it.getStart() + 1;
primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(new ByteArrayInputStream(
FileShareTestHelper.getRandomBuffer((int) size)), size).setOffset(it.getStart()), null, null);
});
rangesToClear.forEach(it -> {
long size = it.getEnd() - it.getStart() + 1;
primaryFileClient.clearRangeWithResponse(size, it.getStart(), null, null);
});
ShareFileRangeList rangeDiff = primaryFileClient.listRangesDiff(snapshotId);
assertEquals(expectedRanges.size(), rangeDiff.getRanges().size());
assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = rangeDiff.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = rangeDiff.getClearRanges().get(i);
ClearRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void listRangesDiffOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(Constants.KB);
fileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)),
Constants.KB);
String snapshotId = primaryFileServiceClient.getShareClient(fileClient.getShareName())
.createSnapshot()
.getSnapshot();
List<FileRange> rangesToUpdate = FileShareTestHelper.createFileRanges();
List<FileRange> rangesToClear = FileShareTestHelper.createFileRanges();
List<FileRange> expectedRanges = FileShareTestHelper.createFileRanges();
List<FileRange> expectedClearRanges = FileShareTestHelper.createFileRanges();
rangesToUpdate.forEach(it -> {
long size = it.getEnd() - it.getStart() + 1;
fileClient.uploadWithResponse(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) size)),
size, it.getStart(), null, null);
});
rangesToClear.forEach(it -> {
long size = it.getEnd() - it.getStart() + 1;
fileClient.clearRangeWithResponse(size, it.getStart(), null, null);
});
ShareFileRangeList rangeDiff = fileClient.listRangesDiff(snapshotId);
assertEquals(expectedRanges.size(), rangeDiff.getRanges().size());
assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = rangeDiff.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = rangeDiff.getClearRanges().get(i);
FileRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@Test
public void listRangesDiffWithRange() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong());
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null);
FileRange range = primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025, 1026L)), null,
null).getValue().getRanges().get(0);
assertEquals(1025, range.getStart());
assertEquals(1026, range.getEnd());
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@Test
public void listRangesDiffLease() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong());
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null);
String leaseId = createLeaseClient(primaryFileClient).acquireLease();
FileRange range = primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions(snapInfo.getSnapshot())
.setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)), null, null)
.getValue().getRanges().get(0);
assertEquals(1024, range.getStart());
assertEquals(1030, range.getEnd());
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void listRangesDiffTrailingDot() throws IOException {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
String fileNameWithDot = generateShareName() + ".";
primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong());
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileNameWithDot);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong()).setOffset(1024L);
primaryFileClient.uploadRangeWithResponse(options, null, null);
FileRange range = primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025L, 1026L)), null,
null).getValue().getRanges().get(0);
assertEquals(1025, range.getStart());
assertEquals(1026, range.getEnd());
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileNameWithDot);
}
@Test
public void listRangesDiffLeaseFail() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong());
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
primaryFileClient.uploadWithResponse(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), 1024L, null,
null);
assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions(snapInfo.getSnapshot())
.setRequestConditions(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid())), null, null).getValue().getRanges().get(0));
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesDiffFail() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
assertThrows(ShareStorageException.class, () ->
primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions("2020-08-07T16:58:02.0000000Z"), null, null).getValue().getRanges()
.get(0));
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listHandles() {
primaryFileClient.create(1024);
assertEquals(0, primaryFileClient.listHandles().stream().count());
}
@Test
public void listHandlesWithMaxResult() {
primaryFileClient.create(1024);
assertEquals(0, primaryFileClient.listHandles(2, null, null).stream().count());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void listHandlesTrailingDot() {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(1024);
assertEquals(0, primaryFileClient.listHandles().stream().count());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void listHandlesOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(Constants.KB);
assertEquals(0, fileClient.listHandles().stream().count());
}
@PlaybackOnly
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03")
@Test
public void listHandlesAccessRights() {
ShareClient shareClient = primaryFileServiceClient.getShareClient("myshare");
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient("mydirectory");
ShareFileClient fileClient = directoryClient.getFileClient("myfile");
List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList());
assertEquals(list.get(0).getAccessRights().get(0), ShareFileHandleAccessRights.WRITE);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03")
@Test
public void forceCloseHandleMin() {
primaryFileClient.create(512);
CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
public void forceCloseHandleInvalidHandleID() {
primaryFileClient.create(512);
assertThrows(ShareStorageException.class, () -> primaryFileClient.forceCloseHandle("invalidHandleId"));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void forceCloseHandleTrailingDot() {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(512);
CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void forceCloseHandleOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(512);
CloseHandlesInfo handlesClosedInfo = fileClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
@Test
public void forceCloseAllHandlesMin() {
primaryFileClient.create(512);
CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseAllHandles(null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameMin() {
primaryFileClient.create(512);
assertNotNull(primaryFileClient.rename(generatePathName()));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(strings = {"\u200B", "\u200C", "\u200D", "\uFEFF"})
public void renameWithUnicodeChars(String specialChar) {
ShareFileClient fileClient = shareClient.getFileClient("test-file-source" + specialChar + " pdf.txt");
fileClient.create(512);
ShareFileClient destClient = fileClient.rename("test-file-destination" + specialChar + " pdf.txt");
assertNotNull(destClient);
assertTrue(Utility.urlEncode(destClient.getFileUrl()).contains(Utility.urlEncode(specialChar)));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameWithResponse() {
primaryFileClient.create(512);
Response<ShareFileClient> resp = primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()), null, null);
ShareFileClient renamedClient = resp.getValue();
assertNotNull(renamedClient.getProperties());
assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12")
@Test
public void renameSasToken() {
ShareFileSasPermission permissions = new ShareFileSasPermission()
.setReadPermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setDeletePermission(true);
OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1);
ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions);
String sas = shareClient.generateSas(sasValues);
ShareFileClient client = getFileClient(sas, primaryFileClient.getFileUrl());
primaryFileClient.create(1024);
String fileName = generatePathName();
ShareFileClient destClient = client.rename(fileName);
assertNotNull(destClient.getProperties());
assertEquals(fileName, destClient.getFilePath());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameDifferentDirectory() {
primaryFileClient.create(512);
ShareDirectoryClient dc = shareClient.getDirectoryClient(generatePathName());
dc.create();
ShareFileClient destinationPath = dc.getFileClient(generatePathName());
ShareFileClient resultClient = primaryFileClient.rename(destinationPath.getFilePath());
assertTrue(destinationPath.exists());
assertEquals(destinationPath.getFilePath(), resultClient.getFilePath());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameReplaceIfExists(boolean replaceIfExists) {
primaryFileClient.create(512);
ShareFileClient destination = shareClient.getFileClient(generatePathName());
destination.create(512);
boolean exception = false;
try {
primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath())
.setReplaceIfExists(replaceIfExists), null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(replaceIfExists, !exception);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameIgnoreReadOnly(boolean ignoreReadOnly) {
primaryFileClient.create(512);
FileSmbProperties props = new FileSmbProperties()
.setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY));
ShareFileClient destinationFile = shareClient.getFileClient(generatePathName());
destinationFile.createWithResponse(512L, null, props, null, null, null, null, null);
boolean exception = false;
try {
primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destinationFile.getFilePath())
.setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true), null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(exception, !ignoreReadOnly);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameFilePermission() {
primaryFileClient.create(512);
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission), null, null).getValue();
assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameFilePermissionAndKeySet() {
primaryFileClient.create(512);
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission)
.setSmbProperties(new FileSmbProperties().setFilePermissionKey("filePermissionkey")), null, null)
.getValue());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void renameFileSmbProperties() {
primaryFileClient.create(512);
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
String permissionKey = shareClient.createPermission(filePermission);
OffsetDateTime fileCreationTime = testResourceNamer.now().minusDays(5);
OffsetDateTime fileLastWriteTime = testResourceNamer.now().minusYears(2);
OffsetDateTime fileChangeTime = testResourceNamer.now();
FileSmbProperties smbProperties = new FileSmbProperties()
.setFilePermissionKey(permissionKey)
.setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY))
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangeTime);
ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName())
.setSmbProperties(smbProperties), null, null).getValue();
ShareFileProperties destProperties = destClient.getProperties();
assertEquals(destProperties.getSmbProperties().getNtfsFileAttributes(), EnumSet.of(NtfsFileAttributes.ARCHIVE,
NtfsFileAttributes.READ_ONLY));
assertNotNull(destProperties.getSmbProperties().getFileCreationTime());
assertNotNull(destProperties.getSmbProperties().getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(destProperties.getSmbProperties().getFileChangeTime(),
fileChangeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameMetadata() {
primaryFileClient.create(512);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
Response<ShareFileClient> resp = primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()).setMetadata(updatedMetadata), null, null);
ShareFileClient renamedClient = resp.getValue();
ShareFileProperties getPropertiesAfter = renamedClient.getProperties();
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void renameTrailingDot() {
shareClient = getShareClient(shareName, true, true);
ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient();
ShareFileClient primaryFileClient = rootDirectory.getFileClient(generatePathName() + ".");
primaryFileClient.create(1024);
Response<ShareFileClient> response = primaryFileClient
.renameWithResponse(new ShareFileRenameOptions(generatePathName() + "."), null, null);
FileShareTestHelper.assertResponseStatusCode(response, 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameError() {
primaryFileClient = shareClient.getFileClient(generatePathName());
assertThrows(ShareStorageException.class, () -> primaryFileClient.rename(generatePathName()));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameSourceAC() {
primaryFileClient.create(512);
String leaseID = setupFileLeaseCondition(primaryFileClient, RECEIVED_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions()
.setLeaseId(leaseID);
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()).setSourceRequestConditions(src), null, null), 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameSourceACFail() {
primaryFileClient.create(512);
setupFileLeaseCondition(primaryFileClient, GARBAGE_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions()
.setLeaseId(GARBAGE_LEASE_ID);
assertThrows(ShareStorageException.class, () ->
primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName())
.setSourceRequestConditions(src), null, null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameDestAC() {
primaryFileClient.create(512);
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions()
.setLeaseId(leaseID);
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null,
null), 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameDestACFail() {
primaryFileClient.create(512);
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions()
.setLeaseId(GARBAGE_LEASE_ID);
assertThrows(RuntimeException.class, () -> destFile.renameWithResponse(new ShareFileRenameOptions(pathName)
.setDestinationRequestConditions(src).setReplaceIfExists(true), null, null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void renameContentType() {
primaryFileClient.create(512);
Response<ShareFileClient> resp = primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()).setContentType("mytype"), null, null);
ShareFileClient renamedClient = resp.getValue();
ShareFileProperties props = renamedClient.getProperties();
assertEquals(props.getContentType(), "mytype");
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(512);
String fileRename = generatePathName();
Response<ShareFileClient> resp = fileClient.renameWithResponse(new ShareFileRenameOptions(fileRename), null,
null);
ShareFileClient renamedClient = resp.getValue();
renamedClient.getProperties();
assertEquals(fileRename, renamedClient.getFilePath());
assertThrows(ShareStorageException.class, fileClient::getProperties);
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1,
1, 1), ZoneOffset.UTC).toString();
ShareFileClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot)
.buildFileClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryFileClient.getShareName());
}
@Test
public void getFilePath() {
assertEquals(filePath, primaryFileClient.getFilePath());
}
private static Stream<Arguments> getNonEncodedFileNameSupplier() {
return Stream.of(
Arguments.of("test%test"),
Arguments.of("%Россия 한국 中国!"),
Arguments.of("%E6%96%91%E9%BB%9E"),
Arguments.of("斑點")
);
}
@ParameterizedTest
@MethodSource("getNonEncodedFileNameSupplier")
public void perCallPolicy() {
primaryFileClient.create(512);
ShareFileClient fileClient = fileBuilderHelper(primaryFileClient.getShareName(),
primaryFileClient.getFilePath()).addPolicy(getPerCallVersionPolicy()).buildFileClient();
Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse(null, null);
assertEquals(response.getHeaders().getValue(X_MS_VERSION), "2017-11-09");
}
@Test
public void defaultAudience() {
String fileName = generatePathName();
ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient();
fileClient.create(Constants.KB);
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(null) /* should default to "https:
ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName);
assertTrue(aadFileClient.exists());
}
@Test
public void storageAccountAudience() {
String fileName = generatePathName();
ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient();
fileClient.create(Constants.KB);
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(shareClient.getAccountName())));
ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName);
assertTrue(aadFileClient.exists());
}
@Test
public void audienceError() {
String fileName = generatePathName();
ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient();
fileClient.create(Constants.KB);
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badAudience")));
ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName);
ShareStorageException e = assertThrows(ShareStorageException.class, aadFileClient::exists);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
String fileName = generatePathName();
ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient();
fileClient.create(Constants.KB);
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience));
ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName);
assertTrue(aadFileClient.exists());
}
/* Uncomment this test when Client Name is enabled with STG 93.
@PlaybackOnly
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-02-04")
@Test
public void listHandlesClientName() {
ShareClient client = primaryFileServiceClient.getShareClient("testing");
ShareDirectoryClient directoryClient = client.getDirectoryClient("dir1");
ShareFileClient fileClient = directoryClient.getFileClient("test.txt");
List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList());
assertNotNull(list.get(0).getClientName());
}
*/
} | class FileApiTests extends FileShareTestBase {
private ShareFileClient primaryFileClient;
private ShareClient shareClient;
private String shareName;
private String filePath;
private static Map<String, String> testMetadata;
private static ShareFileHttpHeaders httpHeaders;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
filePath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en")
.setContentType("application/octet-stream");
smbProperties = new FileSmbProperties()
.setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL));
}
@Test
public void getFileURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String fileURL = primaryFileClient.getFileUrl();
assertEquals(expectURL, fileURL);
}
@Test
public void getShareSnapshotURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot();
expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot();
ShareFileClient newFileClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot())
.buildClient().getFileClient(filePath);
String fileURL = newFileClient.getFileUrl();
assertEquals(expectURL, fileURL);
String snapshotEndpoint = String.format("https:
shareName, filePath, shareSnapshotInfo.getSnapshot());
ShareFileClient client = getFileClient(StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint);
assertEquals(client.getFileUrl(), snapshotEndpoint);
}
@Test
public void exists() {
primaryFileClient.create(Constants.KB);
assertTrue(primaryFileClient.exists());
}
@Test
public void doesNotExist() {
assertFalse(primaryFileClient.exists());
}
@Test
public void existsError() {
primaryFileClient = fileBuilderHelper(shareName, filePath)
.sasToken("sig=dummyToken").buildFileClient();
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.exists());
assertEquals(e.getResponse().getStatusCode(), 403);
}
@Test
public void createFile() {
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(1024, null, null, null, null,
null, null), 201);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@Test
public void createFile4TB() {
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(4 * Constants.TB, null, null,
null, null, null,
null), 201);
}
@Test
public void createFileError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.create(-1));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT);
}
@Test
public void createFileWithArgsFpk() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
Response<ShareFileInfo> resp = primaryFileClient
.createWithResponse(1024, httpHeaders, smbProperties, null, testMetadata, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createFileWithArgsFp() {
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
Response<ShareFileInfo> resp = primaryFileClient.createWithResponse(1024, httpHeaders, smbProperties,
FILE_PERMISSION, testMetadata, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void createChangeTime() {
OffsetDateTime changeTime = testResourceNamer.now();
primaryFileClient.createWithResponse(512, null, new FileSmbProperties().setFileChangeTime(changeTime),
null, null, null, null, null);
FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void createFileOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
Response<ShareFileInfo> result = fileClient.createWithResponse(Constants.KB, null, null, null, null, null,
null);
assertEquals(fileClient.getShareName(), shareName);
String[] filePath = fileClient.getFilePath().split("/");
assertEquals(fileName, filePath[1]);
assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG));
}
@Test
public void createFileWithArgsError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.createWithResponse(-1, null, null, null,
testMetadata, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT);
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createFilePermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties smbProperties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
assertThrows(IllegalArgumentException.class, () ->
primaryFileClient.createWithResponse(1024, null, smbProperties, permission, null, null, null));
}
private static Stream<Arguments> permissionAndKeySupplier() {
return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION),
Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB))));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void createFileTrailingDot(boolean allowTrailingDot) {
shareClient = getShareClient(shareName, allowTrailingDot, null);
ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient();
String fileName = generatePathName();
String fileNameWithDot = fileName + ".";
ShareFileClient fileClient = rootDirectory.getFileClient(fileNameWithDot);
fileClient.create(1024);
List<String> foundFiles = new ArrayList<>();
for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) {
foundFiles.add(fileRef.getName());
}
if (allowTrailingDot) {
assertEquals(fileNameWithDot, foundFiles.get(0));
} else {
assertEquals(fileName, foundFiles.get(0));
}
}
@Test
public void uploadAndDownloadData() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null,
null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void uploadAndDownloadDataWithArgs() {
primaryFileClient.create(1024);
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L),
null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient
.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void uploadAndDownloadDataOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void parallelUploadAndDownloadData() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()),
null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null,
null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void parallelUploadAndDownloadDataWithArgs() {
primaryFileClient.create(1024);
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L),
null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream,
new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void parallelUploadInputStreamNoLength() {
primaryFileClient.create(DATA.getDefaultDataSize());
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null);
ByteArrayOutputStream os = new ByteArrayOutputStream();
primaryFileClient.download(os);
assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes());
}
@Test
public void parallelUploadInputStreamBadLength() {
int[] lengths = new int[]{0, -100, DATA.getDefaultDataSize() - 1, DATA.getDefaultDataSize() + 1};
for (int length : lengths) {
primaryFileClient.create(DATA.getDefaultDataSize());
assertThrows(Exception.class, () ->
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
length), null, null));
}
}
@Test
public void uploadSuccessfulRetry() {
primaryFileClient.create(DATA.getDefaultDataSize());
ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(),
primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy());
clientWithFailure.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null);
ByteArrayOutputStream os = new ByteArrayOutputStream();
primaryFileClient.download(os);
assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes());
}
@Test
public void uploadRangeAndDownloadData() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null,
null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void uploadRangeAndDownloadDataWithArgs() {
primaryFileClient.create(1024);
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L),
null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream,
new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@Test
public void downloadAllNull() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse response = primaryFileClient.downloadWithResponse(stream, null, null, null);
byte[] body = stream.toByteArray();
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertArrayEquals(DATA.getDefaultBytes(), body);
CoreUtils.isNullOrEmpty(headers.getMetadata());
assertNotNull(headers.getContentLength());
assertNotNull(headers.getContentType());
assertNull(headers.getContentMd5());
assertNull(headers.getContentEncoding());
assertNull(headers.getCacheControl());
assertNull(headers.getContentDisposition());
assertNull(headers.getContentLanguage());
}
@ParameterizedTest
@ValueSource(ints = {0, 1})
public void downloadEmptyFile(int fileSize) {
primaryFileClient.create(fileSize);
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
primaryFileClient.download(outStream);
byte[] result = outStream.toByteArray();
assertEquals(result.length, fileSize);
if (fileSize > 0) {
assertEquals(0, result[0]);
}
}
/*
This is to test the appropriate integration of DownloadResponse, including setting the correct range values on
HttpGetterInfo.
*/
@Test
public void downloadWithRetryRange() {
/*
We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing
a retry per the DownloadRetryOptions. The next request should have the same range header, which was generated
from the count and offset values in HttpGetterInfo that was constructed on the initial call to download. We
don't need to check the data here, but we want to ensure that the correct range is set each time. This will
test the correction of a bug that was found which caused HttpGetterInfo to have an incorrect offset when it was
constructed in FileClient.download().
*/
primaryFileClient.create(DATA.getDefaultDataSizeLong());
primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ShareFileClient fc2 = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(),
primaryFileClient.getFileUrl(), new MockRetryRangeResponsePolicy("bytes=2-6"));
ShareFileRange range = new ShareFileRange(2, 6L);
DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(3);
RuntimeException e = assertThrows(RuntimeException.class,
() -> fc2.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileDownloadOptions()
.setRange(range).setRetryOptions(options), null, null));
/*
Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is
NOT thrown because the types would not match.
*/
assertInstanceOf(IOException.class, e.getCause());
}
@Test
public void downloadRetryDefault() {
primaryFileClient.create(DATA.getDefaultDataSizeLong());
primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ShareFileClient failureClient = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(),
primaryFileClient.getFileUrl(), new MockFailureResponsePolicy(5));
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
failureClient.download(outStream);
String bodyStr = outStream.toString();
assertEquals(bodyStr, DATA.getDefaultText());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void downloadTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.create(DATA.getDefaultDataSizeLong());
shareFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize());
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
shareFileClient.download(outStream);
String downloadedData = outStream.toString();
assertEquals(downloadedData, DATA.getDefaultText());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void downloadOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(DATA.getDefaultDataSizeLong());
fileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ShareFileProperties properties = fileClient.getProperties();
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse response = fileClient.downloadWithResponse(stream, null, null, null);
byte[] body = stream.toByteArray();
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertArrayEquals(body, DATA.getDefaultBytes());
CoreUtils.isNullOrEmpty(headers.getMetadata());
assertEquals(headers.getContentLength(), properties.getContentLength());
assertEquals(headers.getContentType(), properties.getContentType());
assertEquals(headers.getContentMd5(), properties.getContentMd5());
assertEquals(headers.getContentEncoding(), properties.getContentEncoding());
assertEquals(headers.getCacheControl(), properties.getCacheControl());
assertEquals(headers.getContentDisposition(), properties.getContentDisposition());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@Test
public void uploadRange4TB() {
long fileSize = 4 * Constants.TB;
primaryFileClient.create(fileSize);
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())
.setOffset(fileSize - DATA.getDefaultDataSizeLong()), null, null); /* Upload to end of file. */
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream,
new ShareFileRange(fileSize - DATA.getDefaultDataSizeLong(), fileSize), true, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
}
@ParameterizedTest
@ValueSource(longs = {
4 * Constants.MB,
5 * Constants.MB})
public void uploadBufferedRangeGreaterThanMaxPutRange(long length) {
primaryFileClient.create(length);
ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length));
assertDoesNotThrow(() -> primaryFileClient.upload(data, length, null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void uploadRangeTrailingDot() {
primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(DATA.getDefaultDataSizeLong());
ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(options, null, null);
ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(),
null, null, null);
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
FileShareTestHelper.assertResponseStatusCode(downloadResponse, 200);
assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void uploadRangeOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(DATA.getDefaultDataSizeLong());
Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray());
}
@ParameterizedTest
@MethodSource("bufferedUploadVariousPartitions")
public void bufferedUploadVariousPartitions(Long length, Long uploadChunkLength) {
primaryFileClient.create(length);
ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper
.getRandomBuffer(Math.toIntExact(length)));
assertNotNull(primaryFileClient.upload(data, length, new ParallelTransferOptions()
.setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength)));
}
private static Stream<Arguments> bufferedUploadVariousPartitions() {
return Stream.of(
Arguments.of(1024L, null),
Arguments.of(1024L, 1024L),
Arguments.of(1024L, 256L),
Arguments.of(4L * Constants.MB, null),
Arguments.of(4L * Constants.MB, 1024L),
Arguments.of(20L * Constants.MB, null),
Arguments.of(20L * Constants.MB, 4L * Constants.MB)
);
}
@Test
public void bufferedUploadErrorPartitionTooBig() {
long length = 20 * Constants.MB;
long uploadChunkLength = 20 * Constants.MB;
primaryFileClient.create(length);
ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length));
assertThrows(Exception.class, () -> primaryFileClient.upload(data, length, new ParallelTransferOptions()
.setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength)));
}
@Test
public void uploadDataError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () ->
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong()).setOffset(1L), null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void parallelUploadDataError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () ->
primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void uploadRangeDataError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void uploadDataRetryOnTransientFailure() {
ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(),
primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy());
primaryFileClient.create(1024);
clientWithFailure.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
ByteArrayOutputStream os = new ByteArrayOutputStream();
primaryFileClient.downloadWithResponse(os, new ShareFileRange(0, DATA.getDefaultDataSizeLong() - 1), null, null,
null);
assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes());
}
@Test
public void uploadAndClearRange() {
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileClient.create(fullInfoString.length());
primaryFileClient.uploadRange(fullInfoData, fullInfoString.length());
primaryFileClient.clearRange(7);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
primaryFileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null);
for (byte b : stream.toByteArray()) {
assertEquals(0, b);
}
}
@Test
public void uploadAndClearRangeWithArgs() {
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileClient.create(fullInfoString.length());
primaryFileClient.uploadRange(fullInfoData, fullInfoString.length());
primaryFileClient.clearRangeWithResponse(7, 1, null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, 7L), false, null, null);
for (byte b : stream.toByteArray()) {
assertEquals(0, b);
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void clearRangeTrailingDot() {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(DATA.getDefaultDataSizeLong());
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.clearRangeWithResponse(
DATA.getDefaultDataSizeLong(), 0, null, null), 201);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void uploadAndClearRangeOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
fileClient.create(fullInfoString.length());
fileClient.uploadRange(fullInfoData, fullInfoString.length());
fileClient.clearRange(7);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
fileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null);
for (byte b : stream.toByteArray()) {
assertEquals(0, b);
}
}
@Test
public void clearRangeError() {
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileClient.create(fullInfoString.length());
primaryFileClient.uploadRange(fullInfoData, fullInfoString.length());
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRange(30));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE);
}
@Test
public void clearRangeErrorArgs() {
String fullInfoString = "please clear the range";
InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileClient.create(fullInfoString.length());
primaryFileClient.uploadRange(fullInfoData, fullInfoString.length());
ShareStorageException e = assertThrows(ShareStorageException.class, () ->
primaryFileClient.clearRangeWithResponse(7, 20, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE);
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadDataLengthMismatch(int size, String errMsg) {
primaryFileClient.create(1024);
UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class,
() -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
DATA.getDefaultInputStream(), size), null, Context.NONE));
assertTrue(e.getMessage().contains(errMsg));
}
private static Stream<Arguments> uploadDataLengthMismatchSupplier() {
return Stream.of(
Arguments.of(6, "more than"),
Arguments.of(8, "less than"));
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void parallelUploadDataLengthMismatch(int size, String errMsg) {
primaryFileClient.create(1024);
UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class,
() -> primaryFileClient.upload(DATA.getDefaultInputStream(), size, null));
assertTrue(e.getMessage().contains(errMsg));
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadRangeLengthMismatch(int size, String errMsg) {
primaryFileClient.create(1024);
UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class,
() -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), size));
assertTrue(e.getMessage().contains(errMsg));
}
@Test
public void downloadDataError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileRange(0, 1023L),
false, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void uploadFileDoesNotExist() {
File uploadFile = new File(testFolder.getPath() + "/fakefile.txt");
if (uploadFile.exists()) {
assert uploadFile.delete();
}
UncheckedIOException e = assertThrows(UncheckedIOException.class,
() -> primaryFileClient.uploadFromFile(uploadFile.getPath()));
assertInstanceOf(NoSuchFileException.class, e.getCause());
uploadFile.delete();
}
/*
* Tests downloading a file using a default client that doesn't have a HttpClient passed to it.
*/
@LiveOnly
@ParameterizedTest
@ValueSource(ints = {
0,
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
})
public void downloadFileBufferCopy(int fileSize) throws IOException {
ShareServiceClient shareServiceClient = new ShareServiceClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.buildClient();
ShareFileClient fileClient = shareServiceClient.getShareClient(shareName)
.createFile(filePath, fileSize);
File file = FileShareTestHelper.getRandomFile(fileSize);
fileClient.uploadFromFile(file.toPath().toString());
File outFile = new File(generatePathName() + ".txt");
if (outFile.exists()) {
assertTrue(outFile.delete());
}
fileClient.downloadToFile(outFile.toPath().toString());
assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize));
shareServiceClient.deleteShare(shareName);
outFile.delete();
file.delete();
}
@Test
public void uploadAndDownloadFileExists() throws IOException {
String data = "Download file exists";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (!downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileClient.create(data.length());
primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)),
data.length());
UncheckedIOException e = assertThrows(UncheckedIOException.class,
() -> primaryFileClient.downloadToFile(downloadFile.getPath()));
assertInstanceOf(FileAlreadyExistsException.class, e.getCause());
downloadFile.delete();
}
@Test
public void uploadAndDownloadToFileDoesNotExist() throws IOException {
String data = "Download file DoesNotExist";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileClient.create(data.length());
primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)),
data.length());
primaryFileClient.downloadToFile(downloadFile.getPath());
Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z");
assertEquals(data, scanner.next());
scanner.close();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), downloadFile.getName());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void uploadRangePreserveFileLastWrittenOn() {
FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE};
for (FileLastWrittenMode mode : modes) {
primaryFileClient.create(Constants.KB);
ShareFileProperties initialProps = primaryFileClient.getProperties();
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)),
Constants.KB).setLastWrittenMode(mode), null, null);
ShareFileProperties resultProps = primaryFileClient.getProperties();
if (mode.equals(FileLastWrittenMode.PRESERVE)) {
assertEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties()
.getFileLastWriteTime());
} else {
assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties()
.getFileLastWriteTime());
}
}
}
@Disabled("the groovy test was not testing this test properly. need to investigate this test further.")
@ParameterizedTest
@ValueSource(strings = {
"",
"ü1ü" /* Something that needs to be url encoded. */
})
public void uploadRangeFromURL(String pathSuffix) {
primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient();
primaryFileClient.create(1024);
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length());
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileClient.getShareName())
.setFilePath(primaryFileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileClient client = fileBuilderHelper(shareName, "destination" + pathSuffix)
.endpoint(primaryFileClient.getFileUrl().toString())
.buildFileClient();
client.create(1024);
client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileClient.getFileUrl() + "?"
+ sasToken);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
client.download(stream);
String result = new String(stream.toByteArray());
for (int i = 0; i < length; i++) {
assertEquals(result.charAt(destinationOffset + i), data.charAt(sourceOffset + i));
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void uploadRangeFromURLOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClientSharedKey(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(1024);
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
fileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length());
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(fileClient.getShareName())
.setFilePath(fileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
String fileNameDest = generatePathName();
ShareFileClient fileClientDest = dirClient.getFileClient(fileNameDest);
fileClientDest.create(1024);
Response<ShareFileUploadRangeFromUrlInfo> uploadResponse = fileClientDest.uploadRangeFromUrlWithResponse(length,
destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken, null, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ShareFileDownloadResponse downloadResponse = fileClientDest.downloadWithResponse(stream, null, null, null, null);
ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders();
FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201);
assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206);
assertEquals(headers.getContentLength(), 1024);
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
assertEquals(stream.toByteArray()[0], 117);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void uploadRangeFromUrlPreserveFileLastWrittenOn() {
FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE};
primaryFileClient.create(Constants.KB);
ShareFileClient destinationClient = shareClient.getFileClient(generatePathName());
destinationClient.create(Constants.KB);
ShareFileProperties initialProps = destinationClient.getProperties();
primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)),
Constants.KB);
StorageSharedKeyCredential credential = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileClient.getShareName())
.setFilePath(primaryFileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
for (FileLastWrittenMode mode : modes) {
destinationClient.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(Constants.KB,
primaryFileClient.getFileUrl() + "?" + sasToken).setLastWrittenMode(mode), null, null);
ShareFileProperties resultProps = destinationClient.getProperties();
if (mode.equals(FileLastWrittenMode.PRESERVE)) {
assertTrue(FileShareTestHelper.compareDatesWithPrecision(
initialProps.getSmbProperties().getFileLastWriteTime(),
resultProps.getSmbProperties().getFileLastWriteTime()));
} else {
assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties()
.getFileLastWriteTime());
}
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void uploadRangeFromUrlTrailingDot() {
shareClient = getShareClient(shareName, true, true);
ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient();
ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + ".");
sourceClient.create(Constants.KB);
ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + ".");
destinationClient.create(Constants.KB);
sourceClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)),
Constants.KB);
ShareFileSasPermission permissions = new ShareFileSasPermission()
.setReadPermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setDeletePermission(true);
OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1);
ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions);
String sasToken = shareClient.generateSas(sasValues);
Response<ShareFileUploadRangeFromUrlInfo> res = destinationClient.uploadRangeFromUrlWithResponse(
new ShareFileUploadRangeFromUrlOptions(Constants.KB, sourceClient.getFileUrl() + "?" + sasToken), null,
null);
FileShareTestHelper.assertResponseStatusCode(res, 201);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void uploadRangeFromUrlTrailingDotFail() {
shareClient = getShareClient(shareName, true, false);
ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient();
ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + ".");
sourceClient.create(DATA.getDefaultDataSizeLong());
ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + ".");
destinationClient.create(DATA.getDefaultDataSizeLong());
sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
assertThrows(ShareStorageException.class, () -> destinationClient
.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(DATA.getDefaultDataSizeLong(),
sourceClient.getFileUrl()), null, null));
}
@Test
public void openInputStreamWithRange() throws IOException {
primaryFileClient.create(1024);
ShareFileRange shareFileRange = new ShareFileRange(5L, 10L);
byte[] dataBytes = "long test string".getBytes(StandardCharsets.UTF_8);
ByteArrayInputStream inputStreamData = new ByteArrayInputStream(dataBytes);
primaryFileClient.upload(inputStreamData, dataBytes.length, null);
int totalBytesRead = 0;
StorageFileInputStream stream = primaryFileClient.openInputStream(shareFileRange);
while (stream.read() != -1) {
totalBytesRead++;
}
stream.close();
assertEquals(6, totalBytesRead);
}
@ParameterizedTest
@ValueSource(strings = {
"",
"ü1ü" /* Something that needs to be url encoded. */
})
public void startCopy(String pathSuffix) {
primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient();
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, new ShareFileCopyOptions(),
null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, smbProperties,
setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly,
setArchiveAttribute, null, null, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void startCopyTrailingDot() {
shareClient = getShareClient(shareName, true, true);
ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + ".");
sourceClient.create(1024);
ShareFileClient destClient = shareClient.getFileClient(generatePathName() + ".");
destClient.create(1024);
byte[] data = FileShareTestHelper.getRandomBuffer(Constants.KB);
ByteArrayInputStream inputStream = new ByteArrayInputStream(data);
sourceClient.uploadRange(inputStream, Constants.KB);
SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceClient.getFileUrl(),
new ShareFileCopyOptions(), null);
poller.waitForCompletion();
assertEquals(poller.poll().getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void startCopyTrailingDotFail() {
shareClient = getShareClient(shareName, true, false);
ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + ".");
sourceClient.create(1024);
ShareFileClient destClient = shareClient.getFileClient(generatePathName() + ".");
destClient.create(1024);
sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize());
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap between"
+ " the time subscribed and the time we start observing events.")
@Test
public void startCopyError() {
primaryFileClient.create(1024);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy("some url", testMetadata, null);
ShareStorageException e = assertThrows(ShareStorageException.class, poller::waitForCompletion);
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.INVALID_HEADER_VALUE);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(setFilePermission ? FILE_PERMISSION : null)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setPermissionCopyModeType(permissionType);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@Test
public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setIgnoreReadOnly(true)
.setArchiveAttribute(true);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void startCopyWithOptionsFilePermission() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void startCopyWithOptionsChangeTime() {
ShareFileInfo client = primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
smbProperties.setFileChangeTime(testResourceNamer.now());
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(),
primaryFileClient.getProperties().getSmbProperties().getFileChangeTime());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs)
.setFilePermissionKey(filePermissionKey);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@Test
public void startCopyWithOptionLease() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileClient).acquireLease();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@Test
public void startCopyWithOptionsInvalidLease() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
assertThrows(ShareStorageException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null));
}
@Test
public void startCopyWithOptionsMetadata() {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setMetadata(testMetadata);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void startCopyWithOptionsWithOriginalSmbProperties() {
primaryFileClient.create(1024);
ShareFileProperties initialProperties = primaryFileClient.getProperties();
OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime();
OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime();
OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime();
EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes();
String sourceURL = primaryFileClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileClient).acquireLease();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(true)
.setLastWrittenOn(true)
.setChangedOn(true)
.setFileAttributes(true);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions)
.setSmbPropertiesToCopy(list);
SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
ShareFileProperties resultProperties = primaryFileClient.getProperties();
assertNotNull(pollResponse.getValue().getCopyId());
assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getSmbProperties()
.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getSmbProperties()
.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getSmbProperties()
.getFileChangeTime());
assertEquals(fileAttributes, resultProperties.getSmbProperties().getNtfsFileAttributes());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn,
boolean fileAttributes) {
primaryFileClient.create(1024);
String sourceURL = primaryFileClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(createdOn)
.setLastWrittenOn(lastWrittenOn)
.setChangedOn(changedOn)
.setFileAttributes(fileAttributes);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFileChangeTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE)
.setSmbPropertiesToCopy(list);
assertThrows(IllegalArgumentException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void startCopyOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient sourceClient = dirClient.getFileClient(generatePathName());
sourceClient.create(DATA.getDefaultDataSizeLong());
ShareFileClient destClient = dirClient.getFileClient(generatePathName());
destClient.create(DATA.getDefaultDataSizeLong());
sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong());
String sourceURL = sourceClient.getFileUrl();
SyncPoller<ShareFileCopyInfo, Void> poller = sourceClient.beginCopy(sourceURL, new ShareFileCopyOptions(),
null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse.getValue().getCopyId());
}
@Test
public void abortCopy() {
int fileSize = Constants.MB;
byte[] bytes = new byte[fileSize];
ByteArrayInputStream data = new ByteArrayInputStream(bytes);
ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient();
primaryFileClient.create(fileSize);
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient();
dest.create(fileSize);
SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId()));
}
@Test
public void abortCopyLease() {
int fileSize = Constants.MB;
byte[] bytes = new byte[fileSize];
ByteArrayInputStream data = new ByteArrayInputStream(bytes);
ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient();
primaryFileClient.create(fileSize);
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient();
dest.create(fileSize);
String leaseId = createLeaseClient(primaryFileClient).acquireLease();
ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(
sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
assertThrows(ShareStorageException.class, () -> dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(),
requestConditions, null, null));
}
@Test
public void abortCopyInvalidLease() {
int fileSize = Constants.MB;
byte[] bytes = new byte[fileSize];
ByteArrayInputStream data = new ByteArrayInputStream(bytes);
ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient();
primaryFileClient.create(fileSize);
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient();
dest.create(fileSize);
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
assertThrows(ShareStorageException.class, () -> {
SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(
sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(),
requestConditions, null, null);
});
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void abortCopyTrailingDot() {
ByteArrayInputStream data = new ByteArrayInputStream(new byte[Constants.MB]);
String fileName = generatePathName() + ".";
ShareFileClient primaryFileClient = getFileClient(shareName, fileName, true, null);
primaryFileClient.create(Constants.MB);
primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null);
String sourceURL = primaryFileClient.getFileUrl();
ShareFileClient dest = fileBuilderHelper(shareName, fileName).buildFileClient();
dest.create(Constants.MB);
SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId()));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void abortCopyOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient sourceClient = dirClient.getFileClient(fileName);
sourceClient.create(DATA.getDefaultDataSizeLong());
sourceClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null);
String sourceURL = sourceClient.getFileUrl();
ShareFileClient destClient = dirClient.getFileClient(generatePathName());
destClient.create(DATA.getDefaultDataSizeLong());
SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null);
PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
assertNotNull(pollResponse);
assertNotNull(pollResponse.getValue());
assertThrows(ShareStorageException.class, () -> destClient.abortCopy(pollResponse.getValue().getCopyId()));
}
@Test
public void abortCopyError() {
assertThrows(ShareStorageException.class, () -> primaryFileClient.abortCopy("randomId"));
}
@Test
public void deleteFile() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void deleteFileTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.create(1024);
FileShareTestHelper.assertResponseStatusCode(shareFileClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void deleteFileOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(Constants.KB);
FileShareTestHelper.assertResponseStatusCode(fileClient.deleteWithResponse(null, null), 202);
}
@Test
public void deleteFileError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.deleteWithResponse(null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsFile() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteIfExistsWithResponse(null, null, null),
202);
}
@Test
public void deleteIfExistsFileMin() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
primaryFileClient.deleteIfExists();
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
ShareFileClient client = shareClient.getFileClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null, null);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertFalse(client.exists());
}
@Test
public void deleteIfExistsFileThatWasAlreadyDeleted() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
assertTrue(primaryFileClient.deleteIfExists());
assertFalse(primaryFileClient.deleteIfExists());
}
@Test
public void getProperties() {
primaryFileClient.create(1024);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
Response<ShareFileProperties> resp = primaryFileClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void getPropertiesTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.create(1024);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
Response<ShareFileProperties> resp = shareFileClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void getPropertiesOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
ShareFileInfo createInfo = fileClient.create(Constants.KB);
ShareFileProperties properties = fileClient.getProperties();
assertEquals(createInfo.getETag(), properties.getETag());
assertEquals(createInfo.getLastModified(), properties.getLastModified());
assertEquals(createInfo.getSmbProperties().getFilePermissionKey(),
properties.getSmbProperties().getFilePermissionKey());
assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(),
properties.getSmbProperties().getNtfsFileAttributes());
assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(),
properties.getSmbProperties().getFileLastWriteTime());
assertEquals(createInfo.getSmbProperties().getFileCreationTime(),
properties.getSmbProperties().getFileCreationTime());
assertEquals(createInfo.getSmbProperties().getFileChangeTime(),
properties.getSmbProperties().getFileChangeTime());
assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId());
assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId());
}
@Test
public void getPropertiesError() {
ShareStorageException ex = assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties());
assertTrue(ex.getMessage().contains("ResourceNotFound"));
}
@Test
public void setHttpHeadersFpk() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
null, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void setHttpHeadersFp() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
FILE_PERMISSION, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getLastModified());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void setHttpHeadersChangeTime() {
primaryFileClient.create(512);
OffsetDateTime changeTime = testResourceNamer.now();
primaryFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null);
FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setHttpHeadersTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.create(1024);
OffsetDateTime changeTime = testResourceNamer.now();
shareFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null);
FileShareTestHelper.compareDatesWithPrecision(shareFileClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setHttpHeadersOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(Constants.KB);
httpHeaders = new ShareFileHttpHeaders()
.setContentType("application/octet-stream")
.setContentDisposition("attachment")
.setCacheControl("no-transform")
.setContentEncoding("gzip")
.setContentLanguage("en");
Response<ShareFileInfo> res = fileClient.setPropertiesWithResponse(Constants.KB, httpHeaders, null, null, null,
null);
ShareFileProperties properties = fileClient.getProperties();
FileShareTestHelper.assertResponseStatusCode(res, 200);
assertNotNull(res.getValue().getETag());
assertEquals(res.getValue().getETag(), res.getHeaders().getValue(HttpHeaderName.ETAG));
assertEquals(properties.getContentType(), "application/octet-stream");
assertEquals(properties.getContentDisposition(), "attachment");
assertEquals(properties.getCacheControl(), "no-transform");
assertEquals(properties.getContentEncoding(), "gzip");
assertNull(properties.getContentMd5());
}
@Test
public void setHttpHeadersError() {
primaryFileClient.createWithResponse(1024, null, null, null, null, null, null);
ShareStorageException e = assertThrows(ShareStorageException.class, () ->
primaryFileClient.setPropertiesWithResponse(-1, null, null, null, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT);
}
@Test
public void setMetadata() {
primaryFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareFileProperties getPropertiesBefore = primaryFileClient.getProperties();
Response<ShareFileMetadataInfo> setPropertiesResponse = primaryFileClient
.setMetadataWithResponse(updatedMetadata, null, null);
ShareFileProperties getPropertiesAfter = primaryFileClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setMetadataTrailingDot() {
ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
shareFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareFileProperties getPropertiesBefore = shareFileClient.getProperties();
Response<ShareFileMetadataInfo> setPropertiesResponse = shareFileClient.setMetadataWithResponse(updatedMetadata,
null, null);
ShareFileProperties getPropertiesAfter = shareFileClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setMetadataOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.createWithResponse(Constants.KB, null, null, null, testMetadata, null, null);
ShareFileProperties getPropertiesBefore = fileClient.getProperties();
Response<ShareFileMetadataInfo> setPropertiesResponse = fileClient.setMetadataWithResponse(updatedMetadata,
null, null);
ShareFileProperties getPropertiesAfter = fileClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@Test
public void setMetadataError() {
primaryFileClient.create(1024);
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryFileClient.setMetadataWithResponse(errorMetadata, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void listRanges() throws IOException {
String fileName = generatePathName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
primaryFileClient.listRanges().forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void listRangesTrailingDot() throws IOException {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(1024);
String fileName = generatePathName() + ".";
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
primaryFileClient.listRanges().forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesWithRange() throws IOException {
String fileName = generatePathName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesSnapshot() throws IOException {
String fileName = generatePathName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
primaryFileClient = fileBuilderHelper(shareName, filePath)
.snapshot(snapInfo.getSnapshot())
.buildFileClient();
primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesSnapshotFail() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
primaryFileClient = fileBuilderHelper(shareName, filePath)
.snapshot("2020-08-07T16:58:02.0000000Z")
.buildFileClient();
assertThrows(ShareStorageException.class, () ->
primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
}));
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void listRangesOAuth() throws IOException {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String fileName = generatePathName();
ShareFileClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(Constants.KB);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
fileClient.uploadFromFile(uploadFile);
fileClient.listRanges().forEach(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
});
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear,
List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) {
primaryFileClient.create(4 * Constants.MB);
primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(4 * Constants.MB)),
4 * Constants.MB);
String snapshotId = primaryFileServiceClient.getShareClient(primaryFileClient.getShareName())
.createSnapshot()
.getSnapshot();
rangesToUpdate.forEach(it -> {
long size = it.getEnd() - it.getStart() + 1;
primaryFileClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(new ByteArrayInputStream(
FileShareTestHelper.getRandomBuffer((int) size)), size).setOffset(it.getStart()), null, null);
});
rangesToClear.forEach(it -> {
long size = it.getEnd() - it.getStart() + 1;
primaryFileClient.clearRangeWithResponse(size, it.getStart(), null, null);
});
ShareFileRangeList rangeDiff = primaryFileClient.listRangesDiff(snapshotId);
assertEquals(expectedRanges.size(), rangeDiff.getRanges().size());
assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = rangeDiff.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = rangeDiff.getClearRanges().get(i);
ClearRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void listRangesDiffOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(Constants.KB);
fileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)),
Constants.KB);
String snapshotId = primaryFileServiceClient.getShareClient(fileClient.getShareName())
.createSnapshot()
.getSnapshot();
List<FileRange> rangesToUpdate = FileShareTestHelper.createFileRanges();
List<FileRange> rangesToClear = FileShareTestHelper.createFileRanges();
List<FileRange> expectedRanges = FileShareTestHelper.createFileRanges();
List<FileRange> expectedClearRanges = FileShareTestHelper.createFileRanges();
rangesToUpdate.forEach(it -> {
long size = it.getEnd() - it.getStart() + 1;
fileClient.uploadWithResponse(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) size)),
size, it.getStart(), null, null);
});
rangesToClear.forEach(it -> {
long size = it.getEnd() - it.getStart() + 1;
fileClient.clearRangeWithResponse(size, it.getStart(), null, null);
});
ShareFileRangeList rangeDiff = fileClient.listRangesDiff(snapshotId);
assertEquals(expectedRanges.size(), rangeDiff.getRanges().size());
assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = rangeDiff.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = rangeDiff.getClearRanges().get(i);
FileRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@Test
public void listRangesDiffWithRange() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong());
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null);
FileRange range = primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025, 1026L)), null,
null).getValue().getRanges().get(0);
assertEquals(1025, range.getStart());
assertEquals(1026, range.getEnd());
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10")
@Test
public void listRangesDiffLease() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong());
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null);
String leaseId = createLeaseClient(primaryFileClient).acquireLease();
FileRange range = primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions(snapInfo.getSnapshot())
.setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)), null, null)
.getValue().getRanges().get(0);
assertEquals(1024, range.getStart());
assertEquals(1030, range.getEnd());
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void listRangesDiffTrailingDot() throws IOException {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
String fileNameWithDot = generateShareName() + ".";
primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong());
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileNameWithDot);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(),
DATA.getDefaultDataSizeLong()).setOffset(1024L);
primaryFileClient.uploadRangeWithResponse(options, null, null);
FileRange range = primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025L, 1026L)), null,
null).getValue().getRanges().get(0);
assertEquals(1025, range.getStart());
assertEquals(1026, range.getEnd());
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileNameWithDot);
}
@Test
public void listRangesDiffLeaseFail() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong());
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
ShareSnapshotInfo snapInfo = shareClient.createSnapshot();
primaryFileClient.uploadWithResponse(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), 1024L, null,
null);
assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions(snapInfo.getSnapshot())
.setRequestConditions(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid())), null, null).getValue().getRanges().get(0));
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesDiffFail() throws IOException {
String fileName = generateShareName();
primaryFileClient.create(1024);
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileClient.uploadFromFile(uploadFile);
assertThrows(ShareStorageException.class, () ->
primaryFileClient.listRangesDiffWithResponse(
new ShareFileListRangesDiffOptions("2020-08-07T16:58:02.0000000Z"), null, null).getValue().getRanges()
.get(0));
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listHandles() {
primaryFileClient.create(1024);
assertEquals(0, primaryFileClient.listHandles().stream().count());
}
@Test
public void listHandlesWithMaxResult() {
primaryFileClient.create(1024);
assertEquals(0, primaryFileClient.listHandles(2, null, null).stream().count());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void listHandlesTrailingDot() {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(1024);
assertEquals(0, primaryFileClient.listHandles().stream().count());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void listHandlesOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(Constants.KB);
assertEquals(0, fileClient.listHandles().stream().count());
}
@PlaybackOnly
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03")
@Test
public void listHandlesAccessRights() {
ShareClient shareClient = primaryFileServiceClient.getShareClient("myshare");
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient("mydirectory");
ShareFileClient fileClient = directoryClient.getFileClient("myfile");
List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList());
assertEquals(list.get(0).getAccessRights().get(0), ShareFileHandleAccessRights.WRITE);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03")
@Test
public void forceCloseHandleMin() {
primaryFileClient.create(512);
CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
public void forceCloseHandleInvalidHandleID() {
primaryFileClient.create(512);
assertThrows(ShareStorageException.class, () -> primaryFileClient.forceCloseHandle("invalidHandleId"));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void forceCloseHandleTrailingDot() {
ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null);
primaryFileClient.create(512);
CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void forceCloseHandleOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(512);
CloseHandlesInfo handlesClosedInfo = fileClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
@Test
public void forceCloseAllHandlesMin() {
primaryFileClient.create(512);
CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseAllHandles(null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameMin() {
primaryFileClient.create(512);
assertNotNull(primaryFileClient.rename(generatePathName()));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(strings = {"\u200B", "\u200C", "\u200D", "\uFEFF"})
public void renameWithUnicodeChars(String specialChar) {
ShareFileClient fileClient = shareClient.getFileClient("test-file-source" + specialChar + " pdf.txt");
fileClient.create(512);
ShareFileClient destClient = fileClient.rename("test-file-destination" + specialChar + " pdf.txt");
assertNotNull(destClient);
assertTrue(Utility.urlEncode(destClient.getFileUrl()).contains(Utility.urlEncode(specialChar)));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameWithResponse() {
primaryFileClient.create(512);
Response<ShareFileClient> resp = primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()), null, null);
ShareFileClient renamedClient = resp.getValue();
assertNotNull(renamedClient.getProperties());
assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12")
@Test
public void renameSasToken() {
ShareFileSasPermission permissions = new ShareFileSasPermission()
.setReadPermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setDeletePermission(true);
OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1);
ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions);
String sas = shareClient.generateSas(sasValues);
ShareFileClient client = getFileClient(sas, primaryFileClient.getFileUrl());
primaryFileClient.create(1024);
String fileName = generatePathName();
ShareFileClient destClient = client.rename(fileName);
assertNotNull(destClient.getProperties());
assertEquals(fileName, destClient.getFilePath());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameDifferentDirectory() {
primaryFileClient.create(512);
ShareDirectoryClient dc = shareClient.getDirectoryClient(generatePathName());
dc.create();
ShareFileClient destinationPath = dc.getFileClient(generatePathName());
ShareFileClient resultClient = primaryFileClient.rename(destinationPath.getFilePath());
assertTrue(destinationPath.exists());
assertEquals(destinationPath.getFilePath(), resultClient.getFilePath());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameReplaceIfExists(boolean replaceIfExists) {
primaryFileClient.create(512);
ShareFileClient destination = shareClient.getFileClient(generatePathName());
destination.create(512);
boolean exception = false;
try {
primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath())
.setReplaceIfExists(replaceIfExists), null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(replaceIfExists, !exception);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameIgnoreReadOnly(boolean ignoreReadOnly) {
primaryFileClient.create(512);
FileSmbProperties props = new FileSmbProperties()
.setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY));
ShareFileClient destinationFile = shareClient.getFileClient(generatePathName());
destinationFile.createWithResponse(512L, null, props, null, null, null, null, null);
boolean exception = false;
try {
primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destinationFile.getFilePath())
.setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true), null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(exception, !ignoreReadOnly);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameFilePermission() {
primaryFileClient.create(512);
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission), null, null).getValue();
assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameFilePermissionAndKeySet() {
primaryFileClient.create(512);
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission)
.setSmbProperties(new FileSmbProperties().setFilePermissionKey("filePermissionkey")), null, null)
.getValue());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void renameFileSmbProperties() {
primaryFileClient.create(512);
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
String permissionKey = shareClient.createPermission(filePermission);
OffsetDateTime fileCreationTime = testResourceNamer.now().minusDays(5);
OffsetDateTime fileLastWriteTime = testResourceNamer.now().minusYears(2);
OffsetDateTime fileChangeTime = testResourceNamer.now();
FileSmbProperties smbProperties = new FileSmbProperties()
.setFilePermissionKey(permissionKey)
.setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY))
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangeTime);
ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName())
.setSmbProperties(smbProperties), null, null).getValue();
ShareFileProperties destProperties = destClient.getProperties();
assertEquals(destProperties.getSmbProperties().getNtfsFileAttributes(), EnumSet.of(NtfsFileAttributes.ARCHIVE,
NtfsFileAttributes.READ_ONLY));
assertNotNull(destProperties.getSmbProperties().getFileCreationTime());
assertNotNull(destProperties.getSmbProperties().getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(destProperties.getSmbProperties().getFileChangeTime(),
fileChangeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameMetadata() {
primaryFileClient.create(512);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
Response<ShareFileClient> resp = primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()).setMetadata(updatedMetadata), null, null);
ShareFileClient renamedClient = resp.getValue();
ShareFileProperties getPropertiesAfter = renamedClient.getProperties();
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void renameTrailingDot() {
shareClient = getShareClient(shareName, true, true);
ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient();
ShareFileClient primaryFileClient = rootDirectory.getFileClient(generatePathName() + ".");
primaryFileClient.create(1024);
Response<ShareFileClient> response = primaryFileClient
.renameWithResponse(new ShareFileRenameOptions(generatePathName() + "."), null, null);
FileShareTestHelper.assertResponseStatusCode(response, 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameError() {
primaryFileClient = shareClient.getFileClient(generatePathName());
assertThrows(ShareStorageException.class, () -> primaryFileClient.rename(generatePathName()));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameSourceAC() {
primaryFileClient.create(512);
String leaseID = setupFileLeaseCondition(primaryFileClient, RECEIVED_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions()
.setLeaseId(leaseID);
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()).setSourceRequestConditions(src), null, null), 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameSourceACFail() {
primaryFileClient.create(512);
setupFileLeaseCondition(primaryFileClient, GARBAGE_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions()
.setLeaseId(GARBAGE_LEASE_ID);
assertThrows(ShareStorageException.class, () ->
primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName())
.setSourceRequestConditions(src), null, null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameDestAC() {
primaryFileClient.create(512);
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions()
.setLeaseId(leaseID);
FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null,
null), 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameDestACFail() {
primaryFileClient.create(512);
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions()
.setLeaseId(GARBAGE_LEASE_ID);
assertThrows(RuntimeException.class, () -> destFile.renameWithResponse(new ShareFileRenameOptions(pathName)
.setDestinationRequestConditions(src).setReplaceIfExists(true), null, null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void renameContentType() {
primaryFileClient.create(512);
Response<ShareFileClient> resp = primaryFileClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()).setContentType("mytype"), null, null);
ShareFileClient renamedClient = resp.getValue();
ShareFileProperties props = renamedClient.getProperties();
assertEquals(props.getContentType(), "mytype");
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void renameOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
ShareFileClient fileClient = dirClient.getFileClient(generatePathName());
fileClient.create(512);
String fileRename = generatePathName();
Response<ShareFileClient> resp = fileClient.renameWithResponse(new ShareFileRenameOptions(fileRename), null,
null);
ShareFileClient renamedClient = resp.getValue();
renamedClient.getProperties();
assertEquals(fileRename, renamedClient.getFilePath());
assertThrows(ShareStorageException.class, fileClient::getProperties);
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1,
1, 1), ZoneOffset.UTC).toString();
ShareFileClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot)
.buildFileClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryFileClient.getShareName());
}
@Test
public void getFilePath() {
assertEquals(filePath, primaryFileClient.getFilePath());
}
private static Stream<Arguments> getNonEncodedFileNameSupplier() {
return Stream.of(
Arguments.of("test%test"),
Arguments.of("%Россия 한국 中国!"),
Arguments.of("%E6%96%91%E9%BB%9E"),
Arguments.of("斑點")
);
}
@ParameterizedTest
@MethodSource("getNonEncodedFileNameSupplier")
public void perCallPolicy() {
primaryFileClient.create(512);
ShareFileClient fileClient = fileBuilderHelper(primaryFileClient.getShareName(),
primaryFileClient.getFilePath()).addPolicy(getPerCallVersionPolicy()).buildFileClient();
Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse(null, null);
assertEquals(response.getHeaders().getValue(X_MS_VERSION), "2017-11-09");
}
@Test
public void defaultAudience() {
String fileName = generatePathName();
ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient();
fileClient.create(Constants.KB);
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(null) /* should default to "https:
ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName);
assertTrue(aadFileClient.exists());
}
@Test
public void storageAccountAudience() {
String fileName = generatePathName();
ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient();
fileClient.create(Constants.KB);
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(shareClient.getAccountName())));
ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName);
assertTrue(aadFileClient.exists());
}
@Test
public void audienceError() {
String fileName = generatePathName();
ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient();
fileClient.create(Constants.KB);
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badAudience")));
ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName);
ShareStorageException e = assertThrows(ShareStorageException.class, aadFileClient::exists);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
String fileName = generatePathName();
ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient();
fileClient.create(Constants.KB);
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience));
ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName);
assertTrue(aadFileClient.exists());
}
/* Uncomment this test when Client Name is enabled with STG 93.
@PlaybackOnly
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-02-04")
@Test
public void listHandlesClientName() {
ShareClient client = primaryFileServiceClient.getShareClient("testing");
ShareDirectoryClient directoryClient = client.getDirectoryClient("dir1");
ShareFileClient fileClient = directoryClient.getFileClient("test.txt");
List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList());
assertNotNull(list.get(0).getClientName());
}
*/
} |
The URL does not need to contain the file name pre-encoded because when the specific API makes a service call, in the implementation methods there is a specific parameter called @PathParam that will handle encoding (through azure-core) if the encoded flag is not passed. For example, when calling fileClient.create(), it calls into this method: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/FilesImpl.java#L95 and will handle encoding the file names and urls appropriately through azure-core. | public void getNonEncodedFileName(String fileName) {
primaryDirectoryClient.create();
ShareFileClient fileClient = primaryDirectoryClient.getFileClient(fileName);
assertEquals(primaryDirectoryClient.getDirectoryPath() + "/" + fileName, fileClient.getFilePath());
fileClient.create(1024);
assertTrue(fileClient.exists());
} | assertEquals(primaryDirectoryClient.getDirectoryPath() + "/" + fileName, fileClient.getFilePath()); | public void getNonEncodedFileName(String fileName) {
primaryDirectoryClient.create();
ShareFileClient fileClient = primaryDirectoryClient.getFileClient(fileName);
assertEquals(primaryDirectoryClient.getDirectoryPath() + "/" + fileName, fileClient.getFilePath());
fileClient.create(1024);
assertTrue(fileClient.exists());
} | class DirectoryApiTests extends FileShareTestBase {
private ShareDirectoryClient primaryDirectoryClient;
private ShareClient shareClient;
private String directoryPath;
private String shareName;
private static Map<String, String> testMetadata;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
directoryPath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryDirectoryClient = directoryBuilderHelper(shareName, directoryPath).buildDirectoryClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes>of(NtfsFileAttributes.NORMAL));
}
@Test
public void getDirectoryUrl() {
String accountName = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()).getAccountName();
String expectURL = String.format("https:
directoryPath);
String directoryURL = primaryDirectoryClient.getDirectoryUrl();
assertEquals(expectURL, directoryURL);
}
@Test
public void getShareSnapshotUrl() {
String accountName = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()).getAccountName();
String expectURL = String.format("https:
directoryPath);
ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot();
expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot();
ShareDirectoryClient newDirClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot())
.buildClient().getDirectoryClient(directoryPath);
String directoryURL = newDirClient.getDirectoryUrl();
assertEquals(expectURL, directoryURL);
String snapshotEndpoint = String.format("https:
shareName, directoryPath, shareSnapshotInfo.getSnapshot());
ShareDirectoryClient client = getDirectoryClient(StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint);
assertEquals(client.getDirectoryUrl(), snapshotEndpoint);
}
@Test
public void getSubDirectoryClient() {
ShareDirectoryClient subDirectoryClient = primaryDirectoryClient.getSubdirectoryClient("testSubDirectory");
assertInstanceOf(ShareDirectoryClient.class, subDirectoryClient);
}
@Test
public void getFileClient() {
ShareFileClient fileClient = primaryDirectoryClient.getFileClient("testFile");
assertInstanceOf(ShareFileClient.class, fileClient);
}
private static Stream<Arguments> getNonEncodedFileNameSupplier() {
return Stream.of(
Arguments.of("test%test"),
Arguments.of("%Россия 한국 中国!"),
Arguments.of("%E6%96%91%E9%BB%9E"),
Arguments.of("斑點")
);
}
@ParameterizedTest
@MethodSource("getNonEncodedFileNameSupplier")
@Test
public void exists() {
primaryDirectoryClient.create();
assertTrue(primaryDirectoryClient.exists());
}
@Test
public void doesNotExist() {
assertFalse(primaryDirectoryClient.exists());
}
@Test
public void existsError() {
primaryDirectoryClient = directoryBuilderHelper(shareName, directoryPath)
.sasToken("sig=dummyToken").buildDirectoryClient();
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.exists());
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.exists());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 403, ShareErrorCode.AUTHENTICATION_FAILED);
}
@Test
public void createDirectory() {
assertEquals(201, primaryDirectoryClient.createWithResponse(null, null, null, null, null).getStatusCode());
}
@Test
public void createDirectoryError() {
String testShareName = generateShareName();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> directoryBuilderHelper(testShareName, directoryPath).buildDirectoryClient().create());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.SHARE_NOT_FOUND);
}
@Test
public void createDirectoryWithMetadata() {
assertEquals(201, primaryDirectoryClient.createWithResponse(null, null, testMetadata, null, null)
.getStatusCode());
}
@Test
public void createDirectoryWithFilePermission() {
Response<ShareDirectoryInfo> resp =
primaryDirectoryClient.createWithResponse(null, FILE_PERMISSION, null, null, null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createDirectoryWithFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
Response<ShareDirectoryInfo> resp =
primaryDirectoryClient.createWithResponse(smbProperties, null, null, null, null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createDirectoryWithNtfsAttributes() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> attributes =
EnumSet.of(NtfsFileAttributes.HIDDEN, NtfsFileAttributes.DIRECTORY);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey)
.setNtfsFileAttributes(attributes);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createWithResponse(smbProperties, null, null, null,
null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void createChangeTime() {
OffsetDateTime changeTime = testResourceNamer.now();
primaryDirectoryClient.createWithResponse(new FileSmbProperties().setFileChangeTime(changeTime), null, null,
null, null);
assertTrue(FileShareTestHelper.compareDatesWithPrecision(
primaryDirectoryClient.getProperties().getSmbProperties().getFileChangeTime(), changeTime));
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createDirectoryPermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.createWithResponse(properties, permission, null, null, null));
}
private static Stream<Arguments> permissionAndKeySupplier() {
return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION),
Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB))));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void createTrailingDot(boolean allowTrailingDot) {
ShareClient shareClient = getShareClient(shareName, allowTrailingDot, null);
ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient();
String dirName = generatePathName();
String dirNameWithDot = dirName + ".";
ShareDirectoryClient dirClient = shareClient.getDirectoryClient(dirNameWithDot);
dirClient.create();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) {
foundDirectories.add(fileRef.getName());
}
assertEquals(1, foundDirectories.size());
if (allowTrailingDot) {
assertEquals(dirNameWithDot, foundDirectories.get(0));
} else {
assertEquals(dirName, foundDirectories.get(0));
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void createDirectoryOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
Response<ShareDirectoryInfo> result = dirClient.createWithResponse(null, null, null, null, null);
assertEquals(shareName, dirClient.getShareName());
assertEquals(dirName, dirClient.getDirectoryPath());
assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG));
}
@Test
public void createIfNotExistsDirectoryMin() {
assertNotNull(primaryDirectoryClient.createIfNotExists());
}
@Test
public void createIfNotExistsDirectory() {
assertEquals(201, primaryDirectoryClient
.createIfNotExistsWithResponse(new ShareDirectoryCreateOptions(), null, null).getStatusCode());
}
@Test
public void createIfNotExistsDirectoryError() {
String testShareName = generateShareName();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> directoryBuilderHelper(testShareName, directoryPath).buildDirectoryClient().createIfNotExists());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.SHARE_NOT_FOUND);
}
@Test
public void createIfNotExistsDirectoryThatAlreadyExists() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions();
ShareDirectoryClient primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
Response<ShareDirectoryInfo> initialResponse =
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
Response<ShareDirectoryInfo> secondResponse =
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@Test
public void createIfNotExistsDirectoryWithMetadata() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setMetadata(testMetadata);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createIfNotExistsWithResponse(options,
null, null), 201);
}
@Test
public void createIfNotExistsDirectoryWithFilePermission() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createIfNotExistsDirectoryWithFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setSmbProperties(smbProperties);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createIfNotExistsDirectoryWithNtfsAttributes() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> attributes = EnumSet.of(NtfsFileAttributes.HIDDEN, NtfsFileAttributes.DIRECTORY);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey)
.setNtfsFileAttributes(attributes);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setSmbProperties(smbProperties);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createIfNotExistsDirectoryPermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions()
.setSmbProperties(properties)
.setFilePermission(permission);
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null));
}
@Test
public void deleteDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void deleteTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.create();
FileShareTestHelper.assertResponseStatusCode(directoryClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void deleteDirectoryOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
Response<Void> response = dirClient.deleteWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(response, 202);
assertNotNull(response.getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID));
}
@Test
public void deleteDirectoryError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.delete());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteIfExistsWithResponse(null, null),
202);
}
@Test
public void deleteIfExistsDirectoryMin() {
primaryDirectoryClient.create();
assertTrue(primaryDirectoryClient.deleteIfExists());
}
@Test
public void deleteIfExistsDirectoryThatDoesNotExist() {
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
Response<Boolean> response = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertFalse(primaryDirectoryClient.exists());
}
@Test
public void deleteIfExistsDirectoryThatWasAlreadyDeleted() {
primaryDirectoryClient.create();
Response<Boolean> initialResponse = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
Response<Boolean> secondResponse = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
assertEquals(202, initialResponse.getStatusCode());
assertEquals(404, secondResponse.getStatusCode());
assertTrue(initialResponse.getValue());
assertFalse(secondResponse.getValue());
}
@Test
public void getProperties() {
primaryDirectoryClient.create();
Response<ShareDirectoryProperties> resp = primaryDirectoryClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void getPropertiesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
ShareDirectoryInfo createResponse = directoryClient.createIfNotExists();
Response<ShareDirectoryProperties> propertiesResponse = directoryClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(propertiesResponse, 200);
assertEquals(createResponse.getETag(), propertiesResponse.getValue().getETag());
assertEquals(createResponse.getLastModified(), propertiesResponse.getValue().getLastModified());
FileSmbProperties createSmbProperties = createResponse.getSmbProperties();
FileSmbProperties getPropertiesSmbProperties = propertiesResponse.getValue().getSmbProperties();
assertEquals(createSmbProperties.getFilePermissionKey(), getPropertiesSmbProperties.getFilePermissionKey());
assertEquals(createSmbProperties.getNtfsFileAttributes(), getPropertiesSmbProperties.getNtfsFileAttributes());
assertEquals(createSmbProperties.getFileLastWriteTime(), getPropertiesSmbProperties.getFileLastWriteTime());
assertEquals(createSmbProperties.getFileCreationTime(), getPropertiesSmbProperties.getFileCreationTime());
assertEquals(createSmbProperties.getFileChangeTime(), getPropertiesSmbProperties.getFileChangeTime());
assertEquals(createSmbProperties.getParentId(), getPropertiesSmbProperties.getParentId());
assertEquals(createSmbProperties.getFileId(), getPropertiesSmbProperties.getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void getPropertiesOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
ShareDirectoryInfo createInfo = dirClient.create();
ShareDirectoryProperties properties = dirClient.getProperties();
assertEquals(createInfo.getETag(), properties.getETag());
assertEquals(createInfo.getLastModified(), properties.getLastModified());
assertEquals(createInfo.getSmbProperties().getFilePermissionKey(),
properties.getSmbProperties().getFilePermissionKey());
assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(),
properties.getSmbProperties().getNtfsFileAttributes());
assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(),
properties.getSmbProperties().getFileLastWriteTime());
assertEquals(createInfo.getSmbProperties().getFileCreationTime(),
properties.getSmbProperties().getFileCreationTime());
assertEquals(createInfo.getSmbProperties().getFileChangeTime(),
properties.getSmbProperties().getFileChangeTime());
assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId());
assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId());
}
@Test
public void getPropertiesError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.getPropertiesWithResponse(null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void setPropertiesFilePermission() {
primaryDirectoryClient.create();
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.setPropertiesWithResponse(null, FILE_PERMISSION,
null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void setPropertiesFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.create();
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.setPropertiesWithResponse(smbProperties, null, null,
null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void setHttpHeadersChangeTime() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
OffsetDateTime changeTime = testResourceNamer.now();
smbProperties.setFileChangeTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.setProperties(new FileSmbProperties().setFileChangeTime(changeTime), null);
FileShareTestHelper.compareDatesWithPrecision(primaryDirectoryClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setHttpHeadersTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.createIfNotExists();
Response<ShareDirectoryInfo> res = directoryClient.setPropertiesWithResponse(new FileSmbProperties(), null,
null, null);
FileShareTestHelper.assertResponseStatusCode(res, 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setHttpHeadersOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
Response<ShareDirectoryInfo> res = dirClient.setPropertiesWithResponse(new FileSmbProperties(), null, null,
null);
FileShareTestHelper.assertResponseStatusCode(res, 200);
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void setPropertiesError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.create();
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.setPropertiesWithResponse(properties, permission, null, null));
}
@Test
public void setMetadata() {
primaryDirectoryClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = primaryDirectoryClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
primaryDirectoryClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = primaryDirectoryClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setMetadataTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = directoryClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
directoryClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = directoryClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setMetadataOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = dirClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
dirClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = dirClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@Test
public void setMetadataError() {
primaryDirectoryClient.create();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.setMetadata(errorMetadata));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listFilesAndDirectories(String[] expectedFiles, String[] expectedDirectories) {
primaryDirectoryClient.create();
for (String expectedFile : expectedFiles) {
primaryDirectoryClient.createFile(expectedFile, 2);
}
for (String expectedDirectory : expectedDirectories) {
primaryDirectoryClient.createSubdirectory(expectedDirectory);
}
List<String> foundFiles = new ArrayList<>();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : primaryDirectoryClient.listFilesAndDirectories()) {
if (fileRef.isDirectory()) {
foundDirectories.add(fileRef.getName());
} else {
foundFiles.add(fileRef.getName());
}
}
assertArrayEquals(expectedFiles, foundFiles.toArray());
assertArrayEquals(expectedDirectories, foundDirectories.toArray());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
@ParameterizedTest
@MethodSource("listFilesAndDirectoriesArgsSupplier")
public void listFilesAndDirectoriesArgs(String extraPrefix, Integer maxResults, int numOfResults) {
primaryDirectoryClient.create();
List<String> nameList = new ArrayList<>();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
primaryDirectoryClient.createFile(dirPrefix + 2, 1024);
for (int i = 0; i < 3; i++) {
nameList.add(dirPrefix + i);
}
Iterator<ShareFileItem> fileRefIter = primaryDirectoryClient
.listFilesAndDirectories(prefix + extraPrefix, maxResults, null, null).iterator();
for (int i = 0; i < numOfResults; i++) {
assertEquals(nameList.get(i), fileRefIter.next().getName());
}
assertFalse(fileRefIter.hasNext());
}
private static Stream<Arguments> listFilesAndDirectoriesArgsSupplier() {
return Stream.of(
Arguments.of("", null, 3),
Arguments.of("", 1, 3),
Arguments.of("noOp", 3, 0));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
@ParameterizedTest
@CsvSource(value = {"false,false,false,false", "true,false,false,false", "false,true,false,false",
"false,false,true,false", "false,false,false,true", "true,true,true,true"})
public void listFilesAndDirectoriesExtendedInfoArgs(boolean timestamps, boolean etag, boolean attributes,
boolean permissionKey) {
primaryDirectoryClient.create();
List<String> nameList = new ArrayList<>();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
primaryDirectoryClient.createFile(dirPrefix + 2, 1024);
for (int i = 0; i < 3; i++) {
nameList.add(dirPrefix + i);
}
ShareListFilesAndDirectoriesOptions options = new ShareListFilesAndDirectoriesOptions()
.setPrefix(prefix)
.setIncludeExtendedInfo(true)
.setIncludeTimestamps(timestamps)
.setIncludeETag(etag)
.setIncludeAttributes(attributes)
.setIncludePermissionKey(permissionKey);
List<ShareFileItem> returnedFileList = primaryDirectoryClient.listFilesAndDirectories(options, null, null)
.stream().collect(Collectors.toList());
for (int i = 0; i < nameList.size(); i++) {
assertEquals(nameList.get(i), returnedFileList.get(i).getName());
}
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
public void listFilesAndDirectoriesExtendedInfoResults() {
ShareDirectoryClient parentDir = primaryDirectoryClient;
parentDir.create();
ShareFileClient file = parentDir.createFile(generatePathName(), 1024);
ShareDirectoryClient dir = parentDir.createSubdirectory(generatePathName());
List<ShareFileItem> listResults = parentDir.listFilesAndDirectories(
new ShareListFilesAndDirectoriesOptions()
.setIncludeExtendedInfo(true)
.setIncludeTimestamps(true)
.setIncludePermissionKey(true)
.setIncludeETag(true)
.setIncludeAttributes(true),
null, null)
.stream().collect(Collectors.toList());
ShareFileItem dirListItem;
ShareFileItem fileListItem;
if (listResults.get(0).isDirectory()) {
dirListItem = listResults.get(0);
fileListItem = listResults.get(1);
} else {
dirListItem = listResults.get(1);
fileListItem = listResults.get(0);
}
assertEquals(dirListItem.getName(), new File(dir.getDirectoryPath()).getName());
assertTrue(dirListItem.isDirectory());
assertNotNull(dirListItem.getId());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getId()));
assertEquals(EnumSet.of(NtfsFileAttributes.DIRECTORY), dirListItem.getFileAttributes());
assertNotNull(dirListItem.getPermissionKey());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getPermissionKey()));
assertNotNull(dirListItem.getProperties().getCreatedOn());
assertNotNull(dirListItem.getProperties().getLastAccessedOn());
assertNotNull(dirListItem.getProperties().getLastWrittenOn());
assertNotNull(dirListItem.getProperties().getChangedOn());
assertNotNull(dirListItem.getProperties().getLastModified());
assertNotNull(dirListItem.getProperties().getETag());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getProperties().getETag()));
assertEquals(fileListItem.getName(), new File(file.getFilePath()).getName());
assertFalse(fileListItem.isDirectory());
assertNotNull(fileListItem.getId());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getId()));
assertEquals(EnumSet.of(NtfsFileAttributes.ARCHIVE), fileListItem.getFileAttributes());
assertNotNull(fileListItem.getPermissionKey());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getPermissionKey()));
assertNotNull(fileListItem.getProperties().getCreatedOn());
assertNotNull(fileListItem.getProperties().getLastAccessedOn());
assertNotNull(fileListItem.getProperties().getLastWrittenOn());
assertNotNull(fileListItem.getProperties().getChangedOn());
assertNotNull(fileListItem.getProperties().getLastModified());
assertNotNull(fileListItem.getProperties().getETag());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getProperties().getETag()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncoded() {
String specialCharDirectoryName = "directory\uFFFE";
String specialCharFileName = "file\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(specialCharDirectoryName);
primaryDirectoryClient.createFile(specialCharFileName, 1024);
List<ShareFileItem> shareFileItems = primaryDirectoryClient.listFilesAndDirectories().stream()
.collect(Collectors.toList());
assertEquals(2, shareFileItems.size());
assertTrue(shareFileItems.get(0).isDirectory());
assertEquals(specialCharDirectoryName, shareFileItems.get(0).getName());
assertFalse(shareFileItems.get(1).isDirectory());
assertEquals(specialCharFileName, shareFileItems.get(1).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncodedContinuationToken() {
String specialCharFileName0 = "file0\uFFFE";
String specialCharFileName1 = "file1\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(specialCharFileName0, 1024);
primaryDirectoryClient.createFile(specialCharFileName1, 1024);
List<ShareFileItem> shareFileItems = new ArrayList<>();
for (PagedResponse<ShareFileItem> page : primaryDirectoryClient.listFilesAndDirectories().iterableByPage(1)) {
shareFileItems.addAll(page.getValue());
}
assertEquals(specialCharFileName0, shareFileItems.get(0).getName());
assertEquals(specialCharFileName1, shareFileItems.get(1).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncodedPrefix() {
String specialCharDirectoryName = "directory\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(specialCharDirectoryName);
List<ShareFileItem> shareFileItems = primaryDirectoryClient.listFilesAndDirectories().stream()
.collect(Collectors.toList());
assertEquals(1, shareFileItems.size());
assertTrue(shareFileItems.get(0).isDirectory());
assertEquals(specialCharDirectoryName, shareFileItems.get(0).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void testListFilesAndDirectoriesOAuth() {
ShareDirectoryClient dirClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP))
.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
List<String> fileNames = new ArrayList<>();
List<String> dirNames = new ArrayList<>();
for (int i = 0; i < 11; i++) {
fileNames.add(generatePathName());
}
for (int i = 0; i < 5; i++) {
dirNames.add(generatePathName());
}
for (String file : fileNames) {
dirClient.createFile(file, Constants.KB);
}
for (String directory : dirNames) {
dirClient.createSubdirectory(directory);
}
List<String> foundFiles = new ArrayList<>();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : dirClient.listFilesAndDirectories()) {
if (fileRef.isDirectory()) {
foundDirectories.add(fileRef.getName());
} else {
foundFiles.add(fileRef.getName());
}
}
assertTrue(fileNames.containsAll(foundFiles));
assertTrue(dirNames.containsAll(foundDirectories));
}
@Test
public void listMaxResultsByPage() {
primaryDirectoryClient.create();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
for (PagedResponse<ShareFileItem> page
: primaryDirectoryClient.listFilesAndDirectories(prefix, null, null, null).iterableByPage(1)) {
assertEquals(1, page.getValue().size());
}
}
@ParameterizedTest
@MethodSource("listHandlesSupplier")
public void listHandles(Integer maxResults, boolean recursive) {
primaryDirectoryClient.create();
List<HandleItem> handles = primaryDirectoryClient.listHandles(maxResults, recursive, null, null).stream()
.collect(Collectors.toList());
assertEquals(0, handles.size());
}
private static Stream<Arguments> listHandlesSupplier() {
return Stream.of(
Arguments.of(2, true),
Arguments.of(null, false));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void listHandlesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
String directoryName = generatePathName() + ".";
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(directoryName);
directoryClient.create();
List<HandleItem> handles = directoryClient.listHandles(null, false, null, null).stream()
.collect(Collectors.toList());
assertEquals(0, handles.size());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void listHandlesOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
List<HandleItem> handles = dirClient.listHandles(2, true, null, null).stream().collect(Collectors.toList());
assertEquals(0, handles.size());
}
@Test
public void listHandlesError() {
Exception e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.listHandles(null, true, null, null).iterator().hasNext());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
public void forceCloseHandleMin() {
primaryDirectoryClient.create();
CloseHandlesInfo handlesClosedInfo = primaryDirectoryClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
public void forceCloseHandleInvalidHandleId() {
primaryDirectoryClient.create();
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.forceCloseHandle("invalidHandleId"));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void forceCloseHandleOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
CloseHandlesInfo handlesClosedInfo = dirClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
public void forceCloseAllHandlesMin() {
primaryDirectoryClient.create();
CloseHandlesInfo handlesClosedInfo = primaryDirectoryClient.forceCloseAllHandles(false, null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void forceCloseAllHandlesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.create();
CloseHandlesInfo handlesClosedInfo = directoryClient.forceCloseAllHandles(false, null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameMin() {
primaryDirectoryClient.create();
assertDoesNotThrow(() -> primaryDirectoryClient.rename(generatePathName()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameWithResponse() {
primaryDirectoryClient.create();
Response<ShareDirectoryClient> resp = primaryDirectoryClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()), null, null);
ShareDirectoryClient renamedClient = resp.getValue();
assertDoesNotThrow(renamedClient::getProperties);
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.getProperties());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDifferentDirectory() {
primaryDirectoryClient.create();
ShareDirectoryClient destinationClient = shareClient.getDirectoryClient(generatePathName());
destinationClient.create();
String destinationPath = destinationClient.getFileClient(generatePathName()).getFilePath();
ShareDirectoryClient resultClient = primaryDirectoryClient.rename(destinationPath);
assertTrue(resultClient.exists());
assertFalse(primaryDirectoryClient.exists());
assertEquals(destinationPath, resultClient.getDirectoryPath());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameReplaceIfExists(boolean replaceIfExists) {
primaryDirectoryClient.create();
ShareFileClient destination = shareClient.getFileClient(generatePathName());
destination.create(512L);
boolean exception = false;
try {
primaryDirectoryClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath())
.setReplaceIfExists(replaceIfExists), null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(replaceIfExists, !exception);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameIgnoreReadOnly(boolean ignoreReadOnly) {
primaryDirectoryClient.create();
FileSmbProperties props = new FileSmbProperties().setNtfsFileAttributes(
EnumSet.of(NtfsFileAttributes.READ_ONLY));
ShareFileClient destinationFile = shareClient.getFileClient(generatePathName());
destinationFile.createWithResponse(512L, null, props, null, null, null, null, null);
ShareFileRenameOptions options = new ShareFileRenameOptions(destinationFile.getFilePath())
.setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true);
boolean exception = false;
try {
primaryDirectoryClient.renameWithResponse(options, null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(!ignoreReadOnly, exception);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFilePermission() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission);
ShareDirectoryClient destClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFilePermissionAndKeySet() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission)
.setSmbProperties(new FileSmbProperties()
.setFilePermissionKey("filePermissionkey"));
assertThrows(ShareStorageException.class, () ->
primaryDirectoryClient.renameWithResponse(options, null, null).getValue());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFileSmbProperties() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
String permissionKey = shareClient.createPermission(filePermission);
FileSmbProperties smbProperties = new FileSmbProperties()
.setFilePermissionKey(permissionKey)
.setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.DIRECTORY))
.setFileCreationTime(testResourceNamer.now().minusDays(5))
.setFileLastWriteTime(testResourceNamer.now().minusYears(2))
.setFileChangeTime(testResourceNamer.now());
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName()).setSmbProperties(smbProperties);
ShareDirectoryClient destClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
FileSmbProperties destSmbProperties = destClient.getProperties().getSmbProperties();
assertEquals(EnumSet.of(NtfsFileAttributes.DIRECTORY), destSmbProperties.getNtfsFileAttributes());
assertNotNull(destSmbProperties.getFileCreationTime());
assertNotNull(destSmbProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(destSmbProperties.getFileChangeTime(), testResourceNamer.now());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameMetadata() {
primaryDirectoryClient.create();
String key = "update";
String value = "value";
Map<String, String> updatedMetadata = Collections.singletonMap(key, value);
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setMetadata(updatedMetadata);
ShareDirectoryClient renamedClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
ShareDirectoryProperties properties = renamedClient.getProperties();
assertNotNull(properties.getMetadata().get(key));
assertEquals(value, renamedClient.getProperties().getMetadata().get(key));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String dirRename = generatePathName();
ShareFileRenameOptions options = new ShareFileRenameOptions(dirRename);
ShareDirectoryClient renamedClient = dirClient.renameWithResponse(options, null, null).getValue();
assertDoesNotThrow(renamedClient::getProperties);
assertEquals(dirRename, renamedClient.getDirectoryPath());
assertThrows(ShareStorageException.class, dirClient::getProperties);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameError() {
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.rename(generatePathName()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDestAC() {
primaryDirectoryClient.create();
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions().setLeaseId(leaseID);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.renameWithResponse(
new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null,
null), 200);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDestACFail() {
primaryDirectoryClient.create();
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions().setLeaseId(GARBAGE_LEASE_ID);
assertThrows(RuntimeException.class,
() -> primaryDirectoryClient.renameWithResponse(new ShareFileRenameOptions(pathName)
.setDestinationRequestConditions(src).setReplaceIfExists(true), null, null));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12")
public void testRenameSASToken() {
ShareFileSasPermission permissions = new ShareFileSasPermission()
.setReadPermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setDeletePermission(true);
OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1);
ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions);
String sas = shareClient.generateSas(sasValues);
ShareDirectoryClient client = getDirectoryClient(sas, primaryDirectoryClient.getDirectoryUrl());
primaryDirectoryClient.create();
String directoryName = generatePathName();
ShareDirectoryClient destClient = client.rename(directoryName);
assertNotNull(destClient);
destClient.getProperties();
assertEquals(directoryName, destClient.getDirectoryPath());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void renameTrailingDot() {
shareClient = getShareClient(shareName, true, true);
String directoryName = generatePathName() + ".";
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(directoryName);
directoryClient.create();
assertDoesNotThrow(() -> directoryClient.rename(directoryName));
}
@Test
public void createSubDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryWithResponse(
"testCreateSubDirectory", null, null, null, null, null), 201);
}
@Test
public void createSubDirectoryInvalidName() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectory("test/subdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.PARENT_NOT_FOUND);
}
@Test
public void createSubDirectoryMetadata() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryWithResponse(
"testCreateSubDirectory", null, null, testMetadata, null, null), 201);
}
@Test
public void createSubDirectoryMetadataError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryWithResponse("testsubdirectory", null, null,
Collections.singletonMap("", "value"), null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void createSubDirectoryFilePermission() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createSubdirectoryWithResponse("testCreateSubDirectory",
null, FILE_PERMISSION, null, null, null), 201);
}
@Test
public void createSubDirectoryFilePermissionKey() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createSubdirectoryWithResponse("testCreateSubDirectory", smbProperties, null, null,
null, null), 201);
}
@Test
public void createIfNotExistsSubDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions(), null, null), 201);
}
@Test
public void createIfNotExistsSubDirectoryAlreadyExists() {
String subdirectoryName = generatePathName();
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
primaryDirectoryClient.create();
int initialResponseCode = primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
subdirectoryName,
new ShareDirectoryCreateOptions(),
null, null)
.getStatusCode();
int secondResponseCode = primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
subdirectoryName,
new ShareDirectoryCreateOptions(),
null, null)
.getStatusCode();
assertEquals(201, initialResponseCode);
assertEquals(409, secondResponseCode);
}
@Test
public void createIfNotExistsSubDirectoryInvalidName() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryIfNotExists("test/subdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.PARENT_NOT_FOUND);
}
@Test
public void createIfNotExistsSubDirectoryMetadata() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setMetadata(testMetadata), null, null), 201);
}
@Test
public void createIfNotExistsSubDirectoryMetadataError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testsubdirectory",
new ShareDirectoryCreateOptions()
.setMetadata(Collections.singletonMap("", "value")),
null,
null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void createIfNotExistsSubDirectoryFilePermission() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION), null, null),
201);
}
@Test
public void testCreateIfNotExistsSubDirectoryFilePermissionKey() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setSmbProperties(smbProperties), null, null),
201);
}
@Test
public void testDeleteSubDirectory() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteSubdirectoryWithResponse(
subDirectoryName, null, null), 202);
}
@Test
public void deleteSubDirectoryError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.deleteSubdirectory("testsubdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsSubDirectory() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient
.deleteSubdirectoryIfExistsWithResponse(subDirectoryName, null, null), 202);
}
@Test
public void deleteIfExistsSubDirectoryMin() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
assertTrue(primaryDirectoryClient.deleteSubdirectoryIfExists(subDirectoryName));
}
@Test
public void deleteIfExistsSubDirectoryThatDoesNotExist() {
primaryDirectoryClient.create();
Response<Boolean> response = primaryDirectoryClient.deleteSubdirectoryIfExistsWithResponse("testsubdirectory",
null, null);
assertEquals(404, response.getStatusCode());
assertFalse(response.getValue());
}
@Test
public void createFile() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createFileWithResponse("testCreateFile", 1024, null, null, null, null, null, null),
201);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileInvalidArgs(String fileName, long maxSize, int statusCode, ShareErrorCode errMsg) {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createFileWithResponse(fileName, maxSize, null, null, null, null, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, statusCode, errMsg);
}
@Test
public void createFileMaxOverload() {
primaryDirectoryClient.create();
ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("txt");
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createFileWithResponse("testCreateFile", 1024, httpHeaders, smbProperties,
FILE_PERMISSION, testMetadata, null, null), 201);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileMaxOverloadInvalidArgs(String fileName, long maxSize, ShareFileHttpHeaders httpHeaders,
Map<String, String> metadata, ShareErrorCode errMsg) {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createFileWithResponse(fileName, maxSize, httpHeaders, null, null, metadata,
null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, errMsg);
}
@Test
public void deleteFile() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.deleteFileWithResponse(fileName, null, null), 202);
}
@Test
public void deleteFileError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.deleteFileWithResponse("testfile", null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsFileMin() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
assertTrue(primaryDirectoryClient.deleteFileIfExists(fileName));
}
@Test
public void deleteIfExistsFile() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteFileIfExistsWithResponse(fileName,
null, null), 202);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
primaryDirectoryClient.create();
Response<Boolean> response = primaryDirectoryClient.deleteFileIfExistsWithResponse("testfile", null, null);
assertEquals(404, response.getStatusCode());
assertFalse(response.getValue());
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareDirectoryClient shareSnapshotClient = directoryBuilderHelper(shareName, directoryPath).snapshot(snapshot)
.buildDirectoryClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryDirectoryClient.getShareName());
}
@Test
public void getDirectoryPath() {
assertEquals(directoryPath, primaryDirectoryClient.getDirectoryPath());
}
@Test
public void testPerCallPolicy() {
primaryDirectoryClient.create();
ShareDirectoryClient directoryClient = directoryBuilderHelper(primaryDirectoryClient.getShareName(),
primaryDirectoryClient.getDirectoryPath())
.addPolicy(getPerCallVersionPolicy()).buildDirectoryClient();
Response<ShareDirectoryProperties> response = directoryClient.getPropertiesWithResponse(null, null);
assertDoesNotThrow(() -> response.getHeaders().getValue("x-ms-version").equals("2017-11-09"));
}
@ParameterizedTest
@ValueSource(strings = {"", "/"})
public void rootDirectorySupport(String rootDirPath) {
String dir1Name = "dir1";
String dir2Name = "dir2";
shareClient.createDirectory(dir1Name).createSubdirectory(dir2Name);
ShareDirectoryClient rootDirectory = shareClient.getDirectoryClient(rootDirPath);
assertTrue(rootDirectory.exists());
assertTrue(rootDirectory.getSubdirectoryClient(dir1Name).exists());
}
@Test
public void createShareWithSmallTimeoutsFailForServiceClient() {
int maxRetries = 5;
long retryDelayMillis = 1000;
for (int i = 0; i < maxRetries; i++) {
try {
HttpClientOptions clientOptions = new HttpClientOptions()
.setApplicationId("client-options-id")
.setResponseTimeout(Duration.ofNanos(1))
.setReadTimeout(Duration.ofNanos(1))
.setWriteTimeout(Duration.ofNanos(1))
.setConnectTimeout(Duration.ofNanos(1));
ShareServiceClientBuilder clientBuilder = new ShareServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.retryOptions(new RequestRetryOptions(null, 1, (Integer) null, null, null, null))
.clientOptions(clientOptions);
ShareServiceClient serviceClient = clientBuilder.buildClient();
assertThrows(RuntimeException.class, () -> serviceClient.createShareWithResponse(generateShareName(),
null, Duration.ofSeconds(10), null));
return;
} catch (Exception e) {
try {
Thread.sleep(retryDelayMillis);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}
}
@Test
public void defaultAudience() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(null) /* should default to "https:
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
@Test
public void storageAccountAudience() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(primaryDirectoryClient.getAccountName())));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
@Test
public void audienceError() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badAudience")));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
ShareStorageException e = assertThrows(ShareStorageException.class, aadDirClient::exists);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
} | class DirectoryApiTests extends FileShareTestBase {
private ShareDirectoryClient primaryDirectoryClient;
private ShareClient shareClient;
private String directoryPath;
private String shareName;
private static Map<String, String> testMetadata;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
directoryPath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryDirectoryClient = directoryBuilderHelper(shareName, directoryPath).buildDirectoryClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes>of(NtfsFileAttributes.NORMAL));
}
@Test
public void getDirectoryUrl() {
String accountName = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()).getAccountName();
String expectURL = String.format("https:
directoryPath);
String directoryURL = primaryDirectoryClient.getDirectoryUrl();
assertEquals(expectURL, directoryURL);
}
@Test
public void getShareSnapshotUrl() {
String accountName = StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()).getAccountName();
String expectURL = String.format("https:
directoryPath);
ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot();
expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot();
ShareDirectoryClient newDirClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot())
.buildClient().getDirectoryClient(directoryPath);
String directoryURL = newDirClient.getDirectoryUrl();
assertEquals(expectURL, directoryURL);
String snapshotEndpoint = String.format("https:
shareName, directoryPath, shareSnapshotInfo.getSnapshot());
ShareDirectoryClient client = getDirectoryClient(StorageSharedKeyCredential
.fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint);
assertEquals(client.getDirectoryUrl(), snapshotEndpoint);
}
@Test
public void getSubDirectoryClient() {
ShareDirectoryClient subDirectoryClient = primaryDirectoryClient.getSubdirectoryClient("testSubDirectory");
assertInstanceOf(ShareDirectoryClient.class, subDirectoryClient);
}
@Test
public void getFileClient() {
ShareFileClient fileClient = primaryDirectoryClient.getFileClient("testFile");
assertInstanceOf(ShareFileClient.class, fileClient);
}
private static Stream<Arguments> getNonEncodedFileNameSupplier() {
return Stream.of(
Arguments.of("test%test"),
Arguments.of("%Россия 한국 中国!"),
Arguments.of("%E6%96%91%E9%BB%9E"),
Arguments.of("斑點")
);
}
@ParameterizedTest
@MethodSource("getNonEncodedFileNameSupplier")
@Test
public void exists() {
primaryDirectoryClient.create();
assertTrue(primaryDirectoryClient.exists());
}
@Test
public void doesNotExist() {
assertFalse(primaryDirectoryClient.exists());
}
@Test
public void existsError() {
primaryDirectoryClient = directoryBuilderHelper(shareName, directoryPath)
.sasToken("sig=dummyToken").buildDirectoryClient();
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.exists());
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.exists());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 403, ShareErrorCode.AUTHENTICATION_FAILED);
}
@Test
public void createDirectory() {
assertEquals(201, primaryDirectoryClient.createWithResponse(null, null, null, null, null).getStatusCode());
}
@Test
public void createDirectoryError() {
String testShareName = generateShareName();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> directoryBuilderHelper(testShareName, directoryPath).buildDirectoryClient().create());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.SHARE_NOT_FOUND);
}
@Test
public void createDirectoryWithMetadata() {
assertEquals(201, primaryDirectoryClient.createWithResponse(null, null, testMetadata, null, null)
.getStatusCode());
}
@Test
public void createDirectoryWithFilePermission() {
Response<ShareDirectoryInfo> resp =
primaryDirectoryClient.createWithResponse(null, FILE_PERMISSION, null, null, null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createDirectoryWithFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
Response<ShareDirectoryInfo> resp =
primaryDirectoryClient.createWithResponse(smbProperties, null, null, null, null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createDirectoryWithNtfsAttributes() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> attributes =
EnumSet.of(NtfsFileAttributes.HIDDEN, NtfsFileAttributes.DIRECTORY);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey)
.setNtfsFileAttributes(attributes);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createWithResponse(smbProperties, null, null, null,
null);
assertEquals(201, resp.getStatusCode());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void createChangeTime() {
OffsetDateTime changeTime = testResourceNamer.now();
primaryDirectoryClient.createWithResponse(new FileSmbProperties().setFileChangeTime(changeTime), null, null,
null, null);
assertTrue(FileShareTestHelper.compareDatesWithPrecision(
primaryDirectoryClient.getProperties().getSmbProperties().getFileChangeTime(), changeTime));
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createDirectoryPermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.createWithResponse(properties, permission, null, null, null));
}
private static Stream<Arguments> permissionAndKeySupplier() {
return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION),
Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB))));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void createTrailingDot(boolean allowTrailingDot) {
ShareClient shareClient = getShareClient(shareName, allowTrailingDot, null);
ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient();
String dirName = generatePathName();
String dirNameWithDot = dirName + ".";
ShareDirectoryClient dirClient = shareClient.getDirectoryClient(dirNameWithDot);
dirClient.create();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) {
foundDirectories.add(fileRef.getName());
}
assertEquals(1, foundDirectories.size());
if (allowTrailingDot) {
assertEquals(dirNameWithDot, foundDirectories.get(0));
} else {
assertEquals(dirName, foundDirectories.get(0));
}
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void createDirectoryOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
Response<ShareDirectoryInfo> result = dirClient.createWithResponse(null, null, null, null, null);
assertEquals(shareName, dirClient.getShareName());
assertEquals(dirName, dirClient.getDirectoryPath());
assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG));
}
@Test
public void createIfNotExistsDirectoryMin() {
assertNotNull(primaryDirectoryClient.createIfNotExists());
}
@Test
public void createIfNotExistsDirectory() {
assertEquals(201, primaryDirectoryClient
.createIfNotExistsWithResponse(new ShareDirectoryCreateOptions(), null, null).getStatusCode());
}
@Test
public void createIfNotExistsDirectoryError() {
String testShareName = generateShareName();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> directoryBuilderHelper(testShareName, directoryPath).buildDirectoryClient().createIfNotExists());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.SHARE_NOT_FOUND);
}
@Test
public void createIfNotExistsDirectoryThatAlreadyExists() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions();
ShareDirectoryClient primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
Response<ShareDirectoryInfo> initialResponse =
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
Response<ShareDirectoryInfo> secondResponse =
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@Test
public void createIfNotExistsDirectoryWithMetadata() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setMetadata(testMetadata);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createIfNotExistsWithResponse(options,
null, null), 201);
}
@Test
public void createIfNotExistsDirectoryWithFilePermission() {
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createIfNotExistsDirectoryWithFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setSmbProperties(smbProperties);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void createIfNotExistsDirectoryWithNtfsAttributes() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> attributes = EnumSet.of(NtfsFileAttributes.HIDDEN, NtfsFileAttributes.DIRECTORY);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey)
.setNtfsFileAttributes(attributes);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions().setSmbProperties(smbProperties);
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 201);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void createIfNotExistsDirectoryPermissionAndKeyError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
ShareDirectoryCreateOptions options = new ShareDirectoryCreateOptions()
.setSmbProperties(properties)
.setFilePermission(permission);
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.createIfNotExistsWithResponse(options, null, null));
}
@Test
public void deleteDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void deleteTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.create();
FileShareTestHelper.assertResponseStatusCode(directoryClient.deleteWithResponse(null, null), 202);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void deleteDirectoryOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
Response<Void> response = dirClient.deleteWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(response, 202);
assertNotNull(response.getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID));
}
@Test
public void deleteDirectoryError() {
ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.delete());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteIfExistsWithResponse(null, null),
202);
}
@Test
public void deleteIfExistsDirectoryMin() {
primaryDirectoryClient.create();
assertTrue(primaryDirectoryClient.deleteIfExists());
}
@Test
public void deleteIfExistsDirectoryThatDoesNotExist() {
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
Response<Boolean> response = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertFalse(primaryDirectoryClient.exists());
}
@Test
public void deleteIfExistsDirectoryThatWasAlreadyDeleted() {
primaryDirectoryClient.create();
Response<Boolean> initialResponse = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
Response<Boolean> secondResponse = primaryDirectoryClient.deleteIfExistsWithResponse(null, null);
assertEquals(202, initialResponse.getStatusCode());
assertEquals(404, secondResponse.getStatusCode());
assertTrue(initialResponse.getValue());
assertFalse(secondResponse.getValue());
}
@Test
public void getProperties() {
primaryDirectoryClient.create();
Response<ShareDirectoryProperties> resp = primaryDirectoryClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getETag());
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void getPropertiesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
ShareDirectoryInfo createResponse = directoryClient.createIfNotExists();
Response<ShareDirectoryProperties> propertiesResponse = directoryClient.getPropertiesWithResponse(null, null);
FileShareTestHelper.assertResponseStatusCode(propertiesResponse, 200);
assertEquals(createResponse.getETag(), propertiesResponse.getValue().getETag());
assertEquals(createResponse.getLastModified(), propertiesResponse.getValue().getLastModified());
FileSmbProperties createSmbProperties = createResponse.getSmbProperties();
FileSmbProperties getPropertiesSmbProperties = propertiesResponse.getValue().getSmbProperties();
assertEquals(createSmbProperties.getFilePermissionKey(), getPropertiesSmbProperties.getFilePermissionKey());
assertEquals(createSmbProperties.getNtfsFileAttributes(), getPropertiesSmbProperties.getNtfsFileAttributes());
assertEquals(createSmbProperties.getFileLastWriteTime(), getPropertiesSmbProperties.getFileLastWriteTime());
assertEquals(createSmbProperties.getFileCreationTime(), getPropertiesSmbProperties.getFileCreationTime());
assertEquals(createSmbProperties.getFileChangeTime(), getPropertiesSmbProperties.getFileChangeTime());
assertEquals(createSmbProperties.getParentId(), getPropertiesSmbProperties.getParentId());
assertEquals(createSmbProperties.getFileId(), getPropertiesSmbProperties.getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void getPropertiesOAuth() {
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
ShareDirectoryInfo createInfo = dirClient.create();
ShareDirectoryProperties properties = dirClient.getProperties();
assertEquals(createInfo.getETag(), properties.getETag());
assertEquals(createInfo.getLastModified(), properties.getLastModified());
assertEquals(createInfo.getSmbProperties().getFilePermissionKey(),
properties.getSmbProperties().getFilePermissionKey());
assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(),
properties.getSmbProperties().getNtfsFileAttributes());
assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(),
properties.getSmbProperties().getFileLastWriteTime());
assertEquals(createInfo.getSmbProperties().getFileCreationTime(),
properties.getSmbProperties().getFileCreationTime());
assertEquals(createInfo.getSmbProperties().getFileChangeTime(),
properties.getSmbProperties().getFileChangeTime());
assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId());
assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId());
}
@Test
public void getPropertiesError() {
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.getPropertiesWithResponse(null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void setPropertiesFilePermission() {
primaryDirectoryClient.create();
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.setPropertiesWithResponse(null, FILE_PERMISSION,
null, null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@Test
public void setPropertiesFilePermissionKey() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.create();
Response<ShareDirectoryInfo> resp = primaryDirectoryClient.setPropertiesWithResponse(smbProperties, null, null,
null);
FileShareTestHelper.assertResponseStatusCode(resp, 200);
assertNotNull(resp.getValue().getSmbProperties());
assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(resp.getValue().getSmbProperties().getParentId());
assertNotNull(resp.getValue().getSmbProperties().getFileId());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08")
@Test
public void setHttpHeadersChangeTime() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
OffsetDateTime changeTime = testResourceNamer.now();
smbProperties.setFileChangeTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.setProperties(new FileSmbProperties().setFileChangeTime(changeTime), null);
FileShareTestHelper.compareDatesWithPrecision(primaryDirectoryClient.getProperties().getSmbProperties()
.getFileChangeTime(), changeTime);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setHttpHeadersTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.createIfNotExists();
Response<ShareDirectoryInfo> res = directoryClient.setPropertiesWithResponse(new FileSmbProperties(), null,
null, null);
FileShareTestHelper.assertResponseStatusCode(res, 200);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setHttpHeadersOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.create();
Response<ShareDirectoryInfo> res = dirClient.setPropertiesWithResponse(new FileSmbProperties(), null, null,
null);
FileShareTestHelper.assertResponseStatusCode(res, 200);
}
@ParameterizedTest
@MethodSource("permissionAndKeySupplier")
public void setPropertiesError(String filePermissionKey, String permission) {
FileSmbProperties properties = new FileSmbProperties().setFilePermissionKey(filePermissionKey);
primaryDirectoryClient.create();
assertThrows(IllegalArgumentException.class, () ->
primaryDirectoryClient.setPropertiesWithResponse(properties, permission, null, null));
}
@Test
public void setMetadata() {
primaryDirectoryClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = primaryDirectoryClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
primaryDirectoryClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = primaryDirectoryClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void setMetadataTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = directoryClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
directoryClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = directoryClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@Test
public void setMetadataOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
String dirName = generatePathName();
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
dirClient.createWithResponse(null, null, testMetadata, null, null);
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
ShareDirectoryProperties getPropertiesBefore = dirClient.getProperties();
Response<ShareDirectorySetMetadataInfo> setPropertiesResponse =
dirClient.setMetadataWithResponse(updatedMetadata, null, null);
ShareDirectoryProperties getPropertiesAfter = dirClient.getProperties();
assertEquals(testMetadata, getPropertiesBefore.getMetadata());
FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200);
assertEquals(updatedMetadata, getPropertiesAfter.getMetadata());
}
@Test
public void setMetadataError() {
primaryDirectoryClient.create();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.setMetadata(errorMetadata));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listFilesAndDirectories(String[] expectedFiles, String[] expectedDirectories) {
primaryDirectoryClient.create();
for (String expectedFile : expectedFiles) {
primaryDirectoryClient.createFile(expectedFile, 2);
}
for (String expectedDirectory : expectedDirectories) {
primaryDirectoryClient.createSubdirectory(expectedDirectory);
}
List<String> foundFiles = new ArrayList<>();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : primaryDirectoryClient.listFilesAndDirectories()) {
if (fileRef.isDirectory()) {
foundDirectories.add(fileRef.getName());
} else {
foundFiles.add(fileRef.getName());
}
}
assertArrayEquals(expectedFiles, foundFiles.toArray());
assertArrayEquals(expectedDirectories, foundDirectories.toArray());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
@ParameterizedTest
@MethodSource("listFilesAndDirectoriesArgsSupplier")
public void listFilesAndDirectoriesArgs(String extraPrefix, Integer maxResults, int numOfResults) {
primaryDirectoryClient.create();
List<String> nameList = new ArrayList<>();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
primaryDirectoryClient.createFile(dirPrefix + 2, 1024);
for (int i = 0; i < 3; i++) {
nameList.add(dirPrefix + i);
}
Iterator<ShareFileItem> fileRefIter = primaryDirectoryClient
.listFilesAndDirectories(prefix + extraPrefix, maxResults, null, null).iterator();
for (int i = 0; i < numOfResults; i++) {
assertEquals(nameList.get(i), fileRefIter.next().getName());
}
assertFalse(fileRefIter.hasNext());
}
private static Stream<Arguments> listFilesAndDirectoriesArgsSupplier() {
return Stream.of(
Arguments.of("", null, 3),
Arguments.of("", 1, 3),
Arguments.of("noOp", 3, 0));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
@ParameterizedTest
@CsvSource(value = {"false,false,false,false", "true,false,false,false", "false,true,false,false",
"false,false,true,false", "false,false,false,true", "true,true,true,true"})
public void listFilesAndDirectoriesExtendedInfoArgs(boolean timestamps, boolean etag, boolean attributes,
boolean permissionKey) {
primaryDirectoryClient.create();
List<String> nameList = new ArrayList<>();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
primaryDirectoryClient.createFile(dirPrefix + 2, 1024);
for (int i = 0; i < 3; i++) {
nameList.add(dirPrefix + i);
}
ShareListFilesAndDirectoriesOptions options = new ShareListFilesAndDirectoriesOptions()
.setPrefix(prefix)
.setIncludeExtendedInfo(true)
.setIncludeTimestamps(timestamps)
.setIncludeETag(etag)
.setIncludeAttributes(attributes)
.setIncludePermissionKey(permissionKey);
List<ShareFileItem> returnedFileList = primaryDirectoryClient.listFilesAndDirectories(options, null, null)
.stream().collect(Collectors.toList());
for (int i = 0; i < nameList.size(); i++) {
assertEquals(nameList.get(i), returnedFileList.get(i).getName());
}
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-10-02")
public void listFilesAndDirectoriesExtendedInfoResults() {
ShareDirectoryClient parentDir = primaryDirectoryClient;
parentDir.create();
ShareFileClient file = parentDir.createFile(generatePathName(), 1024);
ShareDirectoryClient dir = parentDir.createSubdirectory(generatePathName());
List<ShareFileItem> listResults = parentDir.listFilesAndDirectories(
new ShareListFilesAndDirectoriesOptions()
.setIncludeExtendedInfo(true)
.setIncludeTimestamps(true)
.setIncludePermissionKey(true)
.setIncludeETag(true)
.setIncludeAttributes(true),
null, null)
.stream().collect(Collectors.toList());
ShareFileItem dirListItem;
ShareFileItem fileListItem;
if (listResults.get(0).isDirectory()) {
dirListItem = listResults.get(0);
fileListItem = listResults.get(1);
} else {
dirListItem = listResults.get(1);
fileListItem = listResults.get(0);
}
assertEquals(dirListItem.getName(), new File(dir.getDirectoryPath()).getName());
assertTrue(dirListItem.isDirectory());
assertNotNull(dirListItem.getId());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getId()));
assertEquals(EnumSet.of(NtfsFileAttributes.DIRECTORY), dirListItem.getFileAttributes());
assertNotNull(dirListItem.getPermissionKey());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getPermissionKey()));
assertNotNull(dirListItem.getProperties().getCreatedOn());
assertNotNull(dirListItem.getProperties().getLastAccessedOn());
assertNotNull(dirListItem.getProperties().getLastWrittenOn());
assertNotNull(dirListItem.getProperties().getChangedOn());
assertNotNull(dirListItem.getProperties().getLastModified());
assertNotNull(dirListItem.getProperties().getETag());
assertFalse(FileShareTestHelper.isAllWhitespace(dirListItem.getProperties().getETag()));
assertEquals(fileListItem.getName(), new File(file.getFilePath()).getName());
assertFalse(fileListItem.isDirectory());
assertNotNull(fileListItem.getId());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getId()));
assertEquals(EnumSet.of(NtfsFileAttributes.ARCHIVE), fileListItem.getFileAttributes());
assertNotNull(fileListItem.getPermissionKey());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getPermissionKey()));
assertNotNull(fileListItem.getProperties().getCreatedOn());
assertNotNull(fileListItem.getProperties().getLastAccessedOn());
assertNotNull(fileListItem.getProperties().getLastWrittenOn());
assertNotNull(fileListItem.getProperties().getChangedOn());
assertNotNull(fileListItem.getProperties().getLastModified());
assertNotNull(fileListItem.getProperties().getETag());
assertFalse(FileShareTestHelper.isAllWhitespace(fileListItem.getProperties().getETag()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncoded() {
String specialCharDirectoryName = "directory\uFFFE";
String specialCharFileName = "file\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(specialCharDirectoryName);
primaryDirectoryClient.createFile(specialCharFileName, 1024);
List<ShareFileItem> shareFileItems = primaryDirectoryClient.listFilesAndDirectories().stream()
.collect(Collectors.toList());
assertEquals(2, shareFileItems.size());
assertTrue(shareFileItems.get(0).isDirectory());
assertEquals(specialCharDirectoryName, shareFileItems.get(0).getName());
assertFalse(shareFileItems.get(1).isDirectory());
assertEquals(specialCharFileName, shareFileItems.get(1).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncodedContinuationToken() {
String specialCharFileName0 = "file0\uFFFE";
String specialCharFileName1 = "file1\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(specialCharFileName0, 1024);
primaryDirectoryClient.createFile(specialCharFileName1, 1024);
List<ShareFileItem> shareFileItems = new ArrayList<>();
for (PagedResponse<ShareFileItem> page : primaryDirectoryClient.listFilesAndDirectories().iterableByPage(1)) {
shareFileItems.addAll(page.getValue());
}
assertEquals(specialCharFileName0, shareFileItems.get(0).getName());
assertEquals(specialCharFileName1, shareFileItems.get(1).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-12-02")
public void listFilesAndDirectoriesEncodedPrefix() {
String specialCharDirectoryName = "directory\uFFFE";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(specialCharDirectoryName);
List<ShareFileItem> shareFileItems = primaryDirectoryClient.listFilesAndDirectories().stream()
.collect(Collectors.toList());
assertEquals(1, shareFileItems.size());
assertTrue(shareFileItems.get(0).isDirectory());
assertEquals(specialCharDirectoryName, shareFileItems.get(0).getName());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void testListFilesAndDirectoriesOAuth() {
ShareDirectoryClient dirClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP))
.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
List<String> fileNames = new ArrayList<>();
List<String> dirNames = new ArrayList<>();
for (int i = 0; i < 11; i++) {
fileNames.add(generatePathName());
}
for (int i = 0; i < 5; i++) {
dirNames.add(generatePathName());
}
for (String file : fileNames) {
dirClient.createFile(file, Constants.KB);
}
for (String directory : dirNames) {
dirClient.createSubdirectory(directory);
}
List<String> foundFiles = new ArrayList<>();
List<String> foundDirectories = new ArrayList<>();
for (ShareFileItem fileRef : dirClient.listFilesAndDirectories()) {
if (fileRef.isDirectory()) {
foundDirectories.add(fileRef.getName());
} else {
foundFiles.add(fileRef.getName());
}
}
assertTrue(fileNames.containsAll(foundFiles));
assertTrue(dirNames.containsAll(foundDirectories));
}
@Test
public void listMaxResultsByPage() {
primaryDirectoryClient.create();
String dirPrefix = generatePathName();
for (int i = 0; i < 2; i++) {
ShareDirectoryClient subDirClient = primaryDirectoryClient.getSubdirectoryClient(dirPrefix + i);
subDirClient.create();
for (int j = 0; j < 2; j++) {
int num = i * 2 + j + 3;
subDirClient.createFile(dirPrefix + num, 1024);
}
}
for (PagedResponse<ShareFileItem> page
: primaryDirectoryClient.listFilesAndDirectories(prefix, null, null, null).iterableByPage(1)) {
assertEquals(1, page.getValue().size());
}
}
@ParameterizedTest
@MethodSource("listHandlesSupplier")
public void listHandles(Integer maxResults, boolean recursive) {
primaryDirectoryClient.create();
List<HandleItem> handles = primaryDirectoryClient.listHandles(maxResults, recursive, null, null).stream()
.collect(Collectors.toList());
assertEquals(0, handles.size());
}
private static Stream<Arguments> listHandlesSupplier() {
return Stream.of(
Arguments.of(2, true),
Arguments.of(null, false));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void listHandlesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
String directoryName = generatePathName() + ".";
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(directoryName);
directoryClient.create();
List<HandleItem> handles = directoryClient.listHandles(null, false, null, null).stream()
.collect(Collectors.toList());
assertEquals(0, handles.size());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void listHandlesOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
List<HandleItem> handles = dirClient.listHandles(2, true, null, null).stream().collect(Collectors.toList());
assertEquals(0, handles.size());
}
@Test
public void listHandlesError() {
Exception e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.listHandles(null, true, null, null).iterator().hasNext());
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
public void forceCloseHandleMin() {
primaryDirectoryClient.create();
CloseHandlesInfo handlesClosedInfo = primaryDirectoryClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
public void forceCloseHandleInvalidHandleId() {
primaryDirectoryClient.create();
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.forceCloseHandle("invalidHandleId"));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void forceCloseHandleOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
CloseHandlesInfo handlesClosedInfo = dirClient.forceCloseHandle("1");
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07")
public void forceCloseAllHandlesMin() {
primaryDirectoryClient.create();
CloseHandlesInfo handlesClosedInfo = primaryDirectoryClient.forceCloseAllHandles(false, null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void forceCloseAllHandlesTrailingDot() {
shareClient = getShareClient(shareName, true, null);
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(generatePathName() + ".");
directoryClient.create();
CloseHandlesInfo handlesClosedInfo = directoryClient.forceCloseAllHandles(false, null, null);
assertEquals(0, handlesClosedInfo.getClosedHandles());
assertEquals(0, handlesClosedInfo.getFailedHandles());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameMin() {
primaryDirectoryClient.create();
assertDoesNotThrow(() -> primaryDirectoryClient.rename(generatePathName()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameWithResponse() {
primaryDirectoryClient.create();
Response<ShareDirectoryClient> resp = primaryDirectoryClient.renameWithResponse(
new ShareFileRenameOptions(generatePathName()), null, null);
ShareDirectoryClient renamedClient = resp.getValue();
assertDoesNotThrow(renamedClient::getProperties);
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.getProperties());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDifferentDirectory() {
primaryDirectoryClient.create();
ShareDirectoryClient destinationClient = shareClient.getDirectoryClient(generatePathName());
destinationClient.create();
String destinationPath = destinationClient.getFileClient(generatePathName()).getFilePath();
ShareDirectoryClient resultClient = primaryDirectoryClient.rename(destinationPath);
assertTrue(resultClient.exists());
assertFalse(primaryDirectoryClient.exists());
assertEquals(destinationPath, resultClient.getDirectoryPath());
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameReplaceIfExists(boolean replaceIfExists) {
primaryDirectoryClient.create();
ShareFileClient destination = shareClient.getFileClient(generatePathName());
destination.create(512L);
boolean exception = false;
try {
primaryDirectoryClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath())
.setReplaceIfExists(replaceIfExists), null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(replaceIfExists, !exception);
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void renameIgnoreReadOnly(boolean ignoreReadOnly) {
primaryDirectoryClient.create();
FileSmbProperties props = new FileSmbProperties().setNtfsFileAttributes(
EnumSet.of(NtfsFileAttributes.READ_ONLY));
ShareFileClient destinationFile = shareClient.getFileClient(generatePathName());
destinationFile.createWithResponse(512L, null, props, null, null, null, null, null);
ShareFileRenameOptions options = new ShareFileRenameOptions(destinationFile.getFilePath())
.setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true);
boolean exception = false;
try {
primaryDirectoryClient.renameWithResponse(options, null, null);
} catch (ShareStorageException ignored) {
exception = true;
}
assertEquals(!ignoreReadOnly, exception);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFilePermission() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission);
ShareDirectoryClient destClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFilePermissionAndKeySet() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setFilePermission(filePermission)
.setSmbProperties(new FileSmbProperties()
.setFilePermissionKey("filePermissionkey"));
assertThrows(ShareStorageException.class, () ->
primaryDirectoryClient.renameWithResponse(options, null, null).getValue());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameFileSmbProperties() {
primaryDirectoryClient.create();
String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)";
String permissionKey = shareClient.createPermission(filePermission);
FileSmbProperties smbProperties = new FileSmbProperties()
.setFilePermissionKey(permissionKey)
.setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.DIRECTORY))
.setFileCreationTime(testResourceNamer.now().minusDays(5))
.setFileLastWriteTime(testResourceNamer.now().minusYears(2))
.setFileChangeTime(testResourceNamer.now());
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName()).setSmbProperties(smbProperties);
ShareDirectoryClient destClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
FileSmbProperties destSmbProperties = destClient.getProperties().getSmbProperties();
assertEquals(EnumSet.of(NtfsFileAttributes.DIRECTORY), destSmbProperties.getNtfsFileAttributes());
assertNotNull(destSmbProperties.getFileCreationTime());
assertNotNull(destSmbProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(destSmbProperties.getFileChangeTime(), testResourceNamer.now());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameMetadata() {
primaryDirectoryClient.create();
String key = "update";
String value = "value";
Map<String, String> updatedMetadata = Collections.singletonMap(key, value);
ShareFileRenameOptions options = new ShareFileRenameOptions(generatePathName())
.setMetadata(updatedMetadata);
ShareDirectoryClient renamedClient = primaryDirectoryClient.renameWithResponse(options, null, null).getValue();
ShareDirectoryProperties properties = renamedClient.getProperties();
assertNotNull(properties.getMetadata().get(key));
assertEquals(value, renamedClient.getProperties().getMetadata().get(key));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameOAuth() {
ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create();
String dirRename = generatePathName();
ShareFileRenameOptions options = new ShareFileRenameOptions(dirRename);
ShareDirectoryClient renamedClient = dirClient.renameWithResponse(options, null, null).getValue();
assertDoesNotThrow(renamedClient::getProperties);
assertEquals(dirRename, renamedClient.getDirectoryPath());
assertThrows(ShareStorageException.class, dirClient::getProperties);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameError() {
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
assertThrows(ShareStorageException.class, () -> primaryDirectoryClient.rename(generatePathName()));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDestAC() {
primaryDirectoryClient.create();
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions().setLeaseId(leaseID);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.renameWithResponse(
new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null,
null), 200);
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10")
public void renameDestACFail() {
primaryDirectoryClient.create();
String pathName = generatePathName();
ShareFileClient destFile = shareClient.getFileClient(pathName);
destFile.create(512);
setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID);
ShareRequestConditions src = new ShareRequestConditions().setLeaseId(GARBAGE_LEASE_ID);
assertThrows(RuntimeException.class,
() -> primaryDirectoryClient.renameWithResponse(new ShareFileRenameOptions(pathName)
.setDestinationRequestConditions(src).setReplaceIfExists(true), null, null));
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12")
public void testRenameSASToken() {
ShareFileSasPermission permissions = new ShareFileSasPermission()
.setReadPermission(true)
.setWritePermission(true)
.setCreatePermission(true)
.setDeletePermission(true);
OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1);
ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions);
String sas = shareClient.generateSas(sasValues);
ShareDirectoryClient client = getDirectoryClient(sas, primaryDirectoryClient.getDirectoryUrl());
primaryDirectoryClient.create();
String directoryName = generatePathName();
ShareDirectoryClient destClient = client.rename(directoryName);
assertNotNull(destClient);
destClient.getProperties();
assertEquals(directoryName, destClient.getDirectoryPath());
}
@Test
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
public void renameTrailingDot() {
shareClient = getShareClient(shareName, true, true);
String directoryName = generatePathName() + ".";
ShareDirectoryClient directoryClient = shareClient.getDirectoryClient(directoryName);
directoryClient.create();
assertDoesNotThrow(() -> directoryClient.rename(directoryName));
}
@Test
public void createSubDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryWithResponse(
"testCreateSubDirectory", null, null, null, null, null), 201);
}
@Test
public void createSubDirectoryInvalidName() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectory("test/subdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.PARENT_NOT_FOUND);
}
@Test
public void createSubDirectoryMetadata() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryWithResponse(
"testCreateSubDirectory", null, null, testMetadata, null, null), 201);
}
@Test
public void createSubDirectoryMetadataError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryWithResponse("testsubdirectory", null, null,
Collections.singletonMap("", "value"), null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void createSubDirectoryFilePermission() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createSubdirectoryWithResponse("testCreateSubDirectory",
null, FILE_PERMISSION, null, null, null), 201);
}
@Test
public void createSubDirectoryFilePermissionKey() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createSubdirectoryWithResponse("testCreateSubDirectory", smbProperties, null, null,
null, null), 201);
}
@Test
public void createIfNotExistsSubDirectory() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions(), null, null), 201);
}
@Test
public void createIfNotExistsSubDirectoryAlreadyExists() {
String subdirectoryName = generatePathName();
primaryDirectoryClient = shareClient.getDirectoryClient(generatePathName());
primaryDirectoryClient.create();
int initialResponseCode = primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
subdirectoryName,
new ShareDirectoryCreateOptions(),
null, null)
.getStatusCode();
int secondResponseCode = primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
subdirectoryName,
new ShareDirectoryCreateOptions(),
null, null)
.getStatusCode();
assertEquals(201, initialResponseCode);
assertEquals(409, secondResponseCode);
}
@Test
public void createIfNotExistsSubDirectoryInvalidName() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryIfNotExists("test/subdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.PARENT_NOT_FOUND);
}
@Test
public void createIfNotExistsSubDirectoryMetadata() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setMetadata(testMetadata), null, null), 201);
}
@Test
public void createIfNotExistsSubDirectoryMetadataError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testsubdirectory",
new ShareDirectoryCreateOptions()
.setMetadata(Collections.singletonMap("", "value")),
null,
null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY);
}
@Test
public void createIfNotExistsSubDirectoryFilePermission() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION), null, null),
201);
}
@Test
public void testCreateIfNotExistsSubDirectoryFilePermissionKey() {
primaryDirectoryClient.create();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.createSubdirectoryIfNotExistsWithResponse(
"testCreateSubDirectory", new ShareDirectoryCreateOptions().setSmbProperties(smbProperties), null, null),
201);
}
@Test
public void testDeleteSubDirectory() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteSubdirectoryWithResponse(
subDirectoryName, null, null), 202);
}
@Test
public void deleteSubDirectoryError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.deleteSubdirectory("testsubdirectory"));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsSubDirectory() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient
.deleteSubdirectoryIfExistsWithResponse(subDirectoryName, null, null), 202);
}
@Test
public void deleteIfExistsSubDirectoryMin() {
String subDirectoryName = "testSubCreateDirectory";
primaryDirectoryClient.create();
primaryDirectoryClient.createSubdirectory(subDirectoryName);
assertTrue(primaryDirectoryClient.deleteSubdirectoryIfExists(subDirectoryName));
}
@Test
public void deleteIfExistsSubDirectoryThatDoesNotExist() {
primaryDirectoryClient.create();
Response<Boolean> response = primaryDirectoryClient.deleteSubdirectoryIfExistsWithResponse("testsubdirectory",
null, null);
assertEquals(404, response.getStatusCode());
assertFalse(response.getValue());
}
@Test
public void createFile() {
primaryDirectoryClient.create();
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createFileWithResponse("testCreateFile", 1024, null, null, null, null, null, null),
201);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileInvalidArgs(String fileName, long maxSize, int statusCode, ShareErrorCode errMsg) {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createFileWithResponse(fileName, maxSize, null, null, null, null, null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, statusCode, errMsg);
}
@Test
public void createFileMaxOverload() {
primaryDirectoryClient.create();
ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("txt");
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.createFileWithResponse("testCreateFile", 1024, httpHeaders, smbProperties,
FILE_PERMISSION, testMetadata, null, null), 201);
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileMaxOverloadInvalidArgs(String fileName, long maxSize, ShareFileHttpHeaders httpHeaders,
Map<String, String> metadata, ShareErrorCode errMsg) {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.createFileWithResponse(fileName, maxSize, httpHeaders, null, null, metadata,
null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, errMsg);
}
@Test
public void deleteFile() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
FileShareTestHelper.assertResponseStatusCode(
primaryDirectoryClient.deleteFileWithResponse(fileName, null, null), 202);
}
@Test
public void deleteFileError() {
primaryDirectoryClient.create();
ShareStorageException e = assertThrows(ShareStorageException.class,
() -> primaryDirectoryClient.deleteFileWithResponse("testfile", null, null));
FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND);
}
@Test
public void deleteIfExistsFileMin() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
assertTrue(primaryDirectoryClient.deleteFileIfExists(fileName));
}
@Test
public void deleteIfExistsFile() {
String fileName = "testCreateFile";
primaryDirectoryClient.create();
primaryDirectoryClient.createFile(fileName, 1024);
FileShareTestHelper.assertResponseStatusCode(primaryDirectoryClient.deleteFileIfExistsWithResponse(fileName,
null, null), 202);
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
primaryDirectoryClient.create();
Response<Boolean> response = primaryDirectoryClient.deleteFileIfExistsWithResponse("testfile", null, null);
assertEquals(404, response.getStatusCode());
assertFalse(response.getValue());
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareDirectoryClient shareSnapshotClient = directoryBuilderHelper(shareName, directoryPath).snapshot(snapshot)
.buildDirectoryClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryDirectoryClient.getShareName());
}
@Test
public void getDirectoryPath() {
assertEquals(directoryPath, primaryDirectoryClient.getDirectoryPath());
}
@Test
public void testPerCallPolicy() {
primaryDirectoryClient.create();
ShareDirectoryClient directoryClient = directoryBuilderHelper(primaryDirectoryClient.getShareName(),
primaryDirectoryClient.getDirectoryPath())
.addPolicy(getPerCallVersionPolicy()).buildDirectoryClient();
Response<ShareDirectoryProperties> response = directoryClient.getPropertiesWithResponse(null, null);
assertDoesNotThrow(() -> response.getHeaders().getValue("x-ms-version").equals("2017-11-09"));
}
@ParameterizedTest
@ValueSource(strings = {"", "/"})
public void rootDirectorySupport(String rootDirPath) {
String dir1Name = "dir1";
String dir2Name = "dir2";
shareClient.createDirectory(dir1Name).createSubdirectory(dir2Name);
ShareDirectoryClient rootDirectory = shareClient.getDirectoryClient(rootDirPath);
assertTrue(rootDirectory.exists());
assertTrue(rootDirectory.getSubdirectoryClient(dir1Name).exists());
}
@Test
public void createShareWithSmallTimeoutsFailForServiceClient() {
int maxRetries = 5;
long retryDelayMillis = 1000;
for (int i = 0; i < maxRetries; i++) {
try {
HttpClientOptions clientOptions = new HttpClientOptions()
.setApplicationId("client-options-id")
.setResponseTimeout(Duration.ofNanos(1))
.setReadTimeout(Duration.ofNanos(1))
.setWriteTimeout(Duration.ofNanos(1))
.setConnectTimeout(Duration.ofNanos(1));
ShareServiceClientBuilder clientBuilder = new ShareServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.retryOptions(new RequestRetryOptions(null, 1, (Integer) null, null, null, null))
.clientOptions(clientOptions);
ShareServiceClient serviceClient = clientBuilder.buildClient();
assertThrows(RuntimeException.class, () -> serviceClient.createShareWithResponse(generateShareName(),
null, Duration.ofSeconds(10), null));
return;
} catch (Exception e) {
try {
Thread.sleep(retryDelayMillis);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
}
}
@Test
public void defaultAudience() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(null) /* should default to "https:
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
@Test
public void storageAccountAudience() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(primaryDirectoryClient.getAccountName())));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
@Test
public void audienceError() {
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badAudience")));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
ShareStorageException e = assertThrows(ShareStorageException.class, aadDirClient::exists);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
String dirName = generatePathName();
ShareDirectoryClient dirClient = directoryBuilderHelper(shareName, dirName).buildDirectoryClient();
dirClient.create();
ShareServiceClient oAuthServiceClient =
getOAuthServiceClient(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience));
ShareDirectoryClient aadDirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName);
assertTrue(aadDirClient.exists());
}
} |
Should we start with `false` as the default and not log redacted headers? | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.enableRedactedHeaderLogging = true;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.enableRedactedHeaderLogging = httpLogOptions.isRedactedHeaderLoggingEnabled();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | this.enableRedactedHeaderLogging = true; | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.disableRedactedHeaderLogging = false;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.disableRedactedHeaderLogging = httpLogOptions.isRedactedHeaderLoggingDisabled();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final boolean enableRedactedHeaderLogging;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest())
.log("HTTP FAILED", e);
throw e;
}
}
private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) {
LoggingEventBuilder log = logger.atLevel(level);
if (LOGGER.canLogAtLevel(level) && request != null) {
if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) {
String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID);
if (clientRequestId != null) {
log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId);
}
}
if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) {
String traceparent = request.getHeaders().getValue(TRACEPARENT);
if (traceparent != null) {
log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent);
}
}
}
return log;
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder, enableRedactedHeaderLogging);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder, enableRedactedHeaderLogging);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
* @param disableRedactedHeaderLogging Flag indicating if redacted headers should be logged.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder, boolean enableRedactedHeaderLogging) {
HttpHeadersAccessHelper.getRawHeaderMap(headers).forEach((key, value) -> {
boolean isAllowed = allowedHeaderNames.contains(key);
if (isAllowed || enableRedactedHeaderLogging) {
logBuilder.addKeyValue(value.getName(), isAllowed ? value.getValue() : REDACTED_PLACEHOLDER);
}
});
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException e) {
logger.log(LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount)
.log("Could not parse the request retry count.");
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final boolean disableRedactedHeaderLogging;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest())
.log("HTTP FAILED", e);
throw e;
}
}
private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) {
LoggingEventBuilder log = logger.atLevel(level);
if (LOGGER.canLogAtLevel(level) && request != null) {
if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) {
String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID);
if (clientRequestId != null) {
log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId);
}
}
if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) {
String traceparent = request.getHeaders().getValue(TRACEPARENT);
if (traceparent != null) {
log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent);
}
}
}
return log;
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder, disableRedactedHeaderLogging);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder, disableRedactedHeaderLogging);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder, boolean disableRedactedHeaderLogging) {
final StringBuilder redactedHeaders = new StringBuilder();
HttpHeadersAccessHelper.getRawHeaderMap(headers).forEach((key, value) -> {
if (allowedHeaderNames.contains(key)) {
logBuilder.addKeyValue(value.getName(), value.getValue());
} else if (!disableRedactedHeaderLogging) {
if (redactedHeaders.length() > 0) {
redactedHeaders.append(',');
}
redactedHeaders.append(value.getName());
}
});
if (redactedHeaders.length() > 0) {
logBuilder.addKeyValue("redactedHeaders", redactedHeaders.toString());
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException e) {
logger.log(LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount)
.log("Could not parse the request retry count.");
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} |
this represents the current state and I don't want to change the default behavior | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.enableRedactedHeaderLogging = true;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.enableRedactedHeaderLogging = httpLogOptions.isRedactedHeaderLoggingEnabled();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | this.enableRedactedHeaderLogging = true; | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.disableRedactedHeaderLogging = false;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.disableRedactedHeaderLogging = httpLogOptions.isRedactedHeaderLoggingDisabled();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final boolean enableRedactedHeaderLogging;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest())
.log("HTTP FAILED", e);
throw e;
}
}
private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) {
LoggingEventBuilder log = logger.atLevel(level);
if (LOGGER.canLogAtLevel(level) && request != null) {
if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) {
String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID);
if (clientRequestId != null) {
log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId);
}
}
if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) {
String traceparent = request.getHeaders().getValue(TRACEPARENT);
if (traceparent != null) {
log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent);
}
}
}
return log;
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder, enableRedactedHeaderLogging);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder, enableRedactedHeaderLogging);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
* @param disableRedactedHeaderLogging Flag indicating if redacted headers should be logged.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder, boolean enableRedactedHeaderLogging) {
HttpHeadersAccessHelper.getRawHeaderMap(headers).forEach((key, value) -> {
boolean isAllowed = allowedHeaderNames.contains(key);
if (isAllowed || enableRedactedHeaderLogging) {
logBuilder.addKeyValue(value.getName(), isAllowed ? value.getValue() : REDACTED_PLACEHOLDER);
}
});
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException e) {
logger.log(LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount)
.log("Could not parse the request retry count.");
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final boolean disableRedactedHeaderLogging;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest())
.log("HTTP FAILED", e);
throw e;
}
}
private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) {
LoggingEventBuilder log = logger.atLevel(level);
if (LOGGER.canLogAtLevel(level) && request != null) {
if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) {
String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID);
if (clientRequestId != null) {
log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId);
}
}
if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) {
String traceparent = request.getHeaders().getValue(TRACEPARENT);
if (traceparent != null) {
log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent);
}
}
}
return log;
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder, disableRedactedHeaderLogging);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder, disableRedactedHeaderLogging);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder, boolean disableRedactedHeaderLogging) {
final StringBuilder redactedHeaders = new StringBuilder();
HttpHeadersAccessHelper.getRawHeaderMap(headers).forEach((key, value) -> {
if (allowedHeaderNames.contains(key)) {
logBuilder.addKeyValue(value.getName(), value.getValue());
} else if (!disableRedactedHeaderLogging) {
if (redactedHeaders.length() > 0) {
redactedHeaders.append(',');
}
redactedHeaders.append(value.getName());
}
});
if (redactedHeaders.length() > 0) {
logBuilder.addKeyValue("redactedHeaders", redactedHeaders.toString());
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException e) {
logger.log(LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount)
.log("Could not parse the request retry count.");
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} |
ok, I changed the default to false, i.e. we have a kind-of-a-breaking change from logging `"foo":"REDACTED","bar":"REDACTED"` to: - not logging it by default - enabling it back with flag on the options to appear in the "redactedHeaders:"foo,bar". Since this alone is somewhat breaking, I'd rather change the default to not logging them, LMK what you think about breaking-ish change and I'm fine either way. | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.enableRedactedHeaderLogging = true;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.enableRedactedHeaderLogging = httpLogOptions.isRedactedHeaderLoggingEnabled();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | this.enableRedactedHeaderLogging = true; | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.disableRedactedHeaderLogging = false;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.disableRedactedHeaderLogging = httpLogOptions.isRedactedHeaderLoggingDisabled();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final boolean enableRedactedHeaderLogging;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest())
.log("HTTP FAILED", e);
throw e;
}
}
private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) {
LoggingEventBuilder log = logger.atLevel(level);
if (LOGGER.canLogAtLevel(level) && request != null) {
if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) {
String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID);
if (clientRequestId != null) {
log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId);
}
}
if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) {
String traceparent = request.getHeaders().getValue(TRACEPARENT);
if (traceparent != null) {
log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent);
}
}
}
return log;
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder, enableRedactedHeaderLogging);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder, enableRedactedHeaderLogging);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
* @param disableRedactedHeaderLogging Flag indicating if redacted headers should be logged.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder, boolean enableRedactedHeaderLogging) {
HttpHeadersAccessHelper.getRawHeaderMap(headers).forEach((key, value) -> {
boolean isAllowed = allowedHeaderNames.contains(key);
if (isAllowed || enableRedactedHeaderLogging) {
logBuilder.addKeyValue(value.getName(), isAllowed ? value.getValue() : REDACTED_PLACEHOLDER);
}
});
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException e) {
logger.log(LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount)
.log("Could not parse the request retry count.");
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final boolean disableRedactedHeaderLogging;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest())
.log("HTTP FAILED", e);
throw e;
}
}
private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) {
LoggingEventBuilder log = logger.atLevel(level);
if (LOGGER.canLogAtLevel(level) && request != null) {
if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) {
String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID);
if (clientRequestId != null) {
log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId);
}
}
if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) {
String traceparent = request.getHeaders().getValue(TRACEPARENT);
if (traceparent != null) {
log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent);
}
}
}
return log;
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder, disableRedactedHeaderLogging);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder, disableRedactedHeaderLogging);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder, boolean disableRedactedHeaderLogging) {
final StringBuilder redactedHeaders = new StringBuilder();
HttpHeadersAccessHelper.getRawHeaderMap(headers).forEach((key, value) -> {
if (allowedHeaderNames.contains(key)) {
logBuilder.addKeyValue(value.getName(), value.getValue());
} else if (!disableRedactedHeaderLogging) {
if (redactedHeaders.length() > 0) {
redactedHeaders.append(',');
}
redactedHeaders.append(value.getName());
}
});
if (redactedHeaders.length() > 0) {
logBuilder.addKeyValue("redactedHeaders", redactedHeaders.toString());
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException e) {
logger.log(LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount)
.log("Could not parse the request retry count.");
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} |
updated default back to enabled | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.enableRedactedHeaderLogging = true;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.enableRedactedHeaderLogging = httpLogOptions.isRedactedHeaderLoggingEnabled();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | this.enableRedactedHeaderLogging = true; | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.disableRedactedHeaderLogging = false;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.disableRedactedHeaderLogging = httpLogOptions.isRedactedHeaderLoggingDisabled();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final boolean enableRedactedHeaderLogging;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest())
.log("HTTP FAILED", e);
throw e;
}
}
private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) {
LoggingEventBuilder log = logger.atLevel(level);
if (LOGGER.canLogAtLevel(level) && request != null) {
if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) {
String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID);
if (clientRequestId != null) {
log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId);
}
}
if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) {
String traceparent = request.getHeaders().getValue(TRACEPARENT);
if (traceparent != null) {
log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent);
}
}
}
return log;
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder, enableRedactedHeaderLogging);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder, enableRedactedHeaderLogging);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
* @param disableRedactedHeaderLogging Flag indicating if redacted headers should be logged.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder, boolean enableRedactedHeaderLogging) {
HttpHeadersAccessHelper.getRawHeaderMap(headers).forEach((key, value) -> {
boolean isAllowed = allowedHeaderNames.contains(key);
if (isAllowed || enableRedactedHeaderLogging) {
logBuilder.addKeyValue(value.getName(), isAllowed ? value.getValue() : REDACTED_PLACEHOLDER);
}
});
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException e) {
logger.log(LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount)
.log("Could not parse the request retry count.");
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final boolean disableRedactedHeaderLogging;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest())
.log("HTTP FAILED", e);
throw e;
}
}
private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) {
LoggingEventBuilder log = logger.atLevel(level);
if (LOGGER.canLogAtLevel(level) && request != null) {
if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) {
String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID);
if (clientRequestId != null) {
log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId);
}
}
if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) {
String traceparent = request.getHeaders().getValue(TRACEPARENT);
if (traceparent != null) {
log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent);
}
}
}
return log;
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder, disableRedactedHeaderLogging);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder, disableRedactedHeaderLogging);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder, boolean disableRedactedHeaderLogging) {
final StringBuilder redactedHeaders = new StringBuilder();
HttpHeadersAccessHelper.getRawHeaderMap(headers).forEach((key, value) -> {
if (allowedHeaderNames.contains(key)) {
logBuilder.addKeyValue(value.getName(), value.getValue());
} else if (!disableRedactedHeaderLogging) {
if (redactedHeaders.length() > 0) {
redactedHeaders.append(',');
}
redactedHeaders.append(value.getName());
}
});
if (redactedHeaders.length() > 0) {
logBuilder.addKeyValue("redactedHeaders", redactedHeaders.toString());
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException e) {
logger.log(LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount)
.log("Could not parse the request retry count.");
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} |
keyword `aoai` is unknown word to spelling check and it only used by SDK team we should try to avoid using it. | public void simpleRetrievalOperation() throws InterruptedException {
String apiKey = Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY");
String deploymentOrModelId = "gpt-4-1106-preview";
String fileName = "retrieval_sample_java_sdk.txt";
client = new AssistantsClientBuilder()
.credential(new KeyCredential(apiKey))
.buildClient();
Path filePath = Paths.get("src", "samples", "resources", fileName);
BinaryData fileData = BinaryData.fromFile(filePath);
FileDetails fileDetails = new FileDetails(fileData).setFilename(fileName);
OpenAIFile openAIFile = client.uploadFile(fileDetails, FilePurpose.ASSISTANTS);
Assistant assistant = client.createAssistant(
new AssistantCreationOptions(deploymentOrModelId)
.setName("Java SDK Retrieval Sample")
.setInstructions("You are a helpful assistant that can help fetch data from files you know about.")
.setTools(Arrays.asList(new RetrievalToolDefinition()))
.setFileIds(Arrays.asList(openAIFile.getId()))
);
AssistantThread thread = client.createThread(new AssistantThreadCreationOptions());
client.createMessage(
thread.getId(),
MessageRole.USER,
"Can you give me the documented codes for 'banana' and 'orange'?");
ThreadRun run = client.createRun(thread, assistant);
do {
Thread.sleep(500);
run = client.getRun(thread.getId(), run.getId());
} while (run.getStatus() == RunStatus.IN_PROGRESS
|| run.getStatus() == RunStatus.QUEUED);
OpenAIPageableListOfThreadMessage messages = client.listMessages(thread.getId());
for (ThreadMessage message : messages.getData()) {
message.getContent().forEach(content -> {
if (content instanceof MessageTextContent) {
MessageTextDetails messageTextDetails = ((MessageTextContent) content).getText();
System.out.println(messageTextDetails.getValue());
messageTextDetails.getAnnotations().forEach(annotation ->
System.out.println("\tAnnotation start: " + annotation.getStartIndex()
+ " ,end: " + annotation.getEndIndex() + " ,text: \"" + annotation.getText() + "\""));
} else if (content instanceof MessageImageFileContent) {
System.out.print("Image file ID: ");
System.out.println(((MessageImageFileContent) content).getImageFile().getFileId());
}
});
}
} | String fileName = "retrieval_sample_java_sdk.txt"; | public void simpleRetrievalOperation() throws InterruptedException {
String apiKey = Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY");
String deploymentOrModelId = "gpt-4-1106-preview";
String fileName = "retrieval_sample_java_sdk.txt";
client = new AssistantsClientBuilder()
.credential(new KeyCredential(apiKey))
.buildClient();
Path filePath = Paths.get("src", "samples", "resources", fileName);
BinaryData fileData = BinaryData.fromFile(filePath);
FileDetails fileDetails = new FileDetails(fileData).setFilename(fileName);
OpenAIFile openAIFile = client.uploadFile(fileDetails, FilePurpose.ASSISTANTS);
Assistant assistant = client.createAssistant(
new AssistantCreationOptions(deploymentOrModelId)
.setName("Java SDK Retrieval Sample")
.setInstructions("You are a helpful assistant that can help fetch data from files you know about.")
.setTools(Arrays.asList(new RetrievalToolDefinition()))
.setFileIds(Arrays.asList(openAIFile.getId()))
);
AssistantThread thread = client.createThread(new AssistantThreadCreationOptions());
client.createMessage(
thread.getId(),
MessageRole.USER,
"Can you give me the documented codes for 'banana' and 'orange'?");
ThreadRun run = client.createRun(thread, assistant);
do {
Thread.sleep(500);
run = client.getRun(thread.getId(), run.getId());
} while (run.getStatus() == RunStatus.IN_PROGRESS
|| run.getStatus() == RunStatus.QUEUED);
OpenAIPageableListOfThreadMessage messages = client.listMessages(thread.getId());
for (ThreadMessage message : messages.getData()) {
message.getContent().forEach(content -> {
if (content instanceof MessageTextContent) {
MessageTextDetails messageTextDetails = ((MessageTextContent) content).getText();
System.out.println(messageTextDetails.getValue());
messageTextDetails.getAnnotations().forEach(annotation ->
System.out.println("\tAnnotation start: " + annotation.getStartIndex()
+ " ,end: " + annotation.getEndIndex() + " ,text: \"" + annotation.getText() + "\""));
} else if (content instanceof MessageImageFileContent) {
System.out.print("Image file ID: ");
System.out.println(((MessageImageFileContent) content).getImageFile().getFileId());
}
});
}
} | class ReadmeSamples {
private AssistantsClient client = new AssistantsClientBuilder().buildClient();
public void createSyncClientKeyCredential() {
AssistantsClient client = new AssistantsClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildClient();
}
public void createAsyncClientKeyCredential() {
AssistantsAsyncClient client = new AssistantsClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildAsyncClient();
}
public void createNonAzureSyncClientWithApiKey() {
AssistantsClient client = new AssistantsClientBuilder()
.credential(new KeyCredential("{openai-secret-key}"))
.buildClient();
}
public void createNonAzureAsyncClientWithApiKey() {
AssistantsAsyncClient client = new AssistantsClientBuilder()
.credential(new KeyCredential("{openai-secret-key}"))
.buildAsyncClient();
}
public void createAssistant() {
AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions("{deploymentOrModelId}")
.setName("Math Tutor")
.setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.");
Assistant assistant = client.createAssistant(assistantCreationOptions);
}
@Test
public void simpleMathAssistantOperations() throws InterruptedException {
String apiKey = Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY");
String deploymentOrModelId = "gpt-4-1106-preview";
client = new AssistantsClientBuilder()
.credential(new KeyCredential(apiKey))
.buildClient();
AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(deploymentOrModelId)
.setName("Math Tutor")
.setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.");
Assistant assistant = client.createAssistant(assistantCreationOptions);
System.out.printf("Assistant ID = \"%s\" is created at %s.%n", assistant.getId(), assistant.getCreatedAt());
String assistantId = assistant.getId();
AssistantThread thread = client.createThread(new AssistantThreadCreationOptions());
String threadId = thread.getId();
String userMessage = "I need to solve the equation `3x + 11 = 14`. Can you help me?";
ThreadMessage threadMessage = client.createMessage(threadId, MessageRole.USER, userMessage);
ThreadRun run = client.createRun(threadId, new CreateRunOptions(assistantId));
CreateAndRunThreadOptions createAndRunThreadOptions = new CreateAndRunThreadOptions(assistantId)
.setThread(new AssistantThreadCreationOptions()
.setMessages(Arrays.asList(new ThreadInitializationMessage(MessageRole.USER,
"I need to solve the equation `3x + 11 = 14`. Can you help me?"))));
run = client.createThreadAndRun(createAndRunThreadOptions);
do {
run = client.getRun(run.getThreadId(), run.getId());
Thread.sleep(1000);
} while (run.getStatus() == RunStatus.QUEUED || run.getStatus() == RunStatus.IN_PROGRESS);
OpenAIPageableListOfThreadMessage messages = client.listMessages(run.getThreadId());
List<ThreadMessage> data = messages.getData();
for (int i = 0; i < data.size(); i++) {
ThreadMessage dataMessage = data.get(i);
MessageRole role = dataMessage.getRole();
for (MessageContent messageContent : dataMessage.getContent()) {
MessageTextContent messageTextContent = (MessageTextContent) messageContent;
System.out.println(i + ": Role = " + role + ", content = " + messageTextContent.getText().getValue());
}
}
}
@Test
@Test
public void simpleFunctionCallOperation() throws InterruptedException {
String apiKey = Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY");
String deploymentOrModelId = "gpt-4-1106-preview";
client = new AssistantsClientBuilder()
.credential(new KeyCredential(apiKey))
.buildClient();
AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(deploymentOrModelId)
.setName("Java Assistants SDK Function Tool Sample Assistant")
.setInstructions("You are a weather bot. Use the provided functions to help answer questions. "
+ "Customize your responses to the user's preferences as much as possible and use friendly "
+ "nicknames for cities whenever possible.")
.setTools(Arrays.asList(
getUserFavoriteCityToolDefinition()
));
Assistant assistant = client.createAssistant(assistantCreationOptions);
AssistantThread thread = client.createThread(new AssistantThreadCreationOptions());
client.createMessage(thread.getId(), MessageRole.USER, "What is the weather like in my favorite city?");
ThreadRun run = client.createRun(thread, assistant);
do {
Thread.sleep(500);
run = client.getRun(thread.getId(), run.getId());
if (run.getStatus() == RunStatus.REQUIRES_ACTION
&& run.getRequiredAction() instanceof SubmitToolOutputsAction) {
SubmitToolOutputsAction requiredAction = (SubmitToolOutputsAction) run.getRequiredAction();
List<ToolOutput> toolOutputs = new ArrayList<>();
for (RequiredToolCall toolCall : requiredAction.getSubmitToolOutputs().getToolCalls()) {
toolOutputs.add(getResolvedToolOutput(toolCall));
}
run = client.submitToolOutputsToRun(thread.getId(), run.getId(), toolOutputs);
}
} while (run.getStatus() == RunStatus.QUEUED || run.getStatus() == RunStatus.IN_PROGRESS);
OpenAIPageableListOfThreadMessage messagesPage = client.listMessages(thread.getId());
List<ThreadMessage> messages = messagesPage.getData();
for (ThreadMessage message : messages) {
for (MessageContent contentItem : message.getContent()) {
if (contentItem instanceof MessageTextContent) {
System.out.println(((MessageTextContent) contentItem).getText().getValue());
} else if (contentItem instanceof MessageImageFileContent) {
System.out.println(((MessageImageFileContent) contentItem).getImageFile().getFileId());
}
}
}
}
public static final String GET_USER_FAVORITE_CITY = "getUserFavoriteCity";
public static final String GET_CITY_NICKNAME = "getCityNickname";
public static final String GET_WEATHER_AT_LOCATION = "getWeatherAtLocation";
private FunctionToolDefinition getUserFavoriteCityToolDefinition() {
class UserFavoriteCityParameters {
@JsonProperty("type")
private String type = "object";
@JsonProperty("properties")
private Map<String, Object> properties = new HashMap<>();
}
return new FunctionToolDefinition(
new FunctionDefinition(
GET_USER_FAVORITE_CITY,
BinaryData.fromObject(new UserFavoriteCityParameters()
)
).setDescription("Gets the user's favorite city."));
}
private static String getUserFavoriteCity() {
return "Seattle, WA";
}
private static String getCityNickname(String location) {
return "The Emerald City";
}
private static String getWeatherAtLocation(String location, String temperatureUnit) {
return temperatureUnit.equals("f") ? "70f" : "21c";
}
private ToolOutput getResolvedToolOutput(RequiredToolCall toolCall) {
if (toolCall instanceof RequiredFunctionToolCall) {
RequiredFunctionToolCall functionToolCall = (RequiredFunctionToolCall) toolCall;
if (functionToolCall.getFunction().getName().equals(GET_USER_FAVORITE_CITY)) {
return new ToolOutput().setToolCallId(toolCall.getId())
.setOutput(getUserFavoriteCity());
}
if (functionToolCall.getFunction().getName().equals(GET_CITY_NICKNAME)) {
Map<String, String> parameters = BinaryData.fromString(
functionToolCall.getFunction().getArguments())
.toObject(new TypeReference<Map<String, String>>() {});
String location = parameters.get("location");
return new ToolOutput().setToolCallId(toolCall.getId())
.setOutput(getCityNickname(location));
}
if (functionToolCall.getFunction().getName().equals(GET_WEATHER_AT_LOCATION)) {
Map<String, String> parameters = BinaryData.fromString(
functionToolCall.getFunction().getArguments())
.toObject(new TypeReference<Map<String, String>>() {});
String location = parameters.get("location");
String unit = parameters.getOrDefault("unit", "c");
return new ToolOutput().setToolCallId(toolCall.getId())
.setOutput(getWeatherAtLocation(location, unit));
}
}
return null;
}
} | class ReadmeSamples {
private AssistantsClient client = new AssistantsClientBuilder().buildClient();
public void createSyncClientKeyCredential() {
AssistantsClient client = new AssistantsClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildClient();
}
public void createAsyncClientKeyCredential() {
AssistantsAsyncClient client = new AssistantsClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildAsyncClient();
}
public void createNonAzureSyncClientWithApiKey() {
AssistantsClient client = new AssistantsClientBuilder()
.credential(new KeyCredential("{openai-secret-key}"))
.buildClient();
}
public void createNonAzureAsyncClientWithApiKey() {
AssistantsAsyncClient client = new AssistantsClientBuilder()
.credential(new KeyCredential("{openai-secret-key}"))
.buildAsyncClient();
}
public void createAssistant() {
AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions("{deploymentOrModelId}")
.setName("Math Tutor")
.setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.");
Assistant assistant = client.createAssistant(assistantCreationOptions);
}
@Test
public void simpleMathAssistantOperations() throws InterruptedException {
String apiKey = Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY");
String deploymentOrModelId = "gpt-4-1106-preview";
client = new AssistantsClientBuilder()
.credential(new KeyCredential(apiKey))
.buildClient();
AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(deploymentOrModelId)
.setName("Math Tutor")
.setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.");
Assistant assistant = client.createAssistant(assistantCreationOptions);
System.out.printf("Assistant ID = \"%s\" is created at %s.%n", assistant.getId(), assistant.getCreatedAt());
String assistantId = assistant.getId();
AssistantThread thread = client.createThread(new AssistantThreadCreationOptions());
String threadId = thread.getId();
String userMessage = "I need to solve the equation `3x + 11 = 14`. Can you help me?";
ThreadMessage threadMessage = client.createMessage(threadId, MessageRole.USER, userMessage);
ThreadRun run = client.createRun(threadId, new CreateRunOptions(assistantId));
CreateAndRunThreadOptions createAndRunThreadOptions = new CreateAndRunThreadOptions(assistantId)
.setThread(new AssistantThreadCreationOptions()
.setMessages(Arrays.asList(new ThreadInitializationMessage(MessageRole.USER,
"I need to solve the equation `3x + 11 = 14`. Can you help me?"))));
run = client.createThreadAndRun(createAndRunThreadOptions);
do {
run = client.getRun(run.getThreadId(), run.getId());
Thread.sleep(1000);
} while (run.getStatus() == RunStatus.QUEUED || run.getStatus() == RunStatus.IN_PROGRESS);
OpenAIPageableListOfThreadMessage messages = client.listMessages(run.getThreadId());
List<ThreadMessage> data = messages.getData();
for (int i = 0; i < data.size(); i++) {
ThreadMessage dataMessage = data.get(i);
MessageRole role = dataMessage.getRole();
for (MessageContent messageContent : dataMessage.getContent()) {
MessageTextContent messageTextContent = (MessageTextContent) messageContent;
System.out.println(i + ": Role = " + role + ", content = " + messageTextContent.getText().getValue());
}
}
}
@Test
@Test
public void simpleFunctionCallOperation() throws InterruptedException {
String apiKey = Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY");
String deploymentOrModelId = "gpt-4-1106-preview";
client = new AssistantsClientBuilder()
.credential(new KeyCredential(apiKey))
.buildClient();
AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(deploymentOrModelId)
.setName("Java Assistants SDK Function Tool Sample Assistant")
.setInstructions("You are a weather bot. Use the provided functions to help answer questions. "
+ "Customize your responses to the user's preferences as much as possible and use friendly "
+ "nicknames for cities whenever possible.")
.setTools(Arrays.asList(
getUserFavoriteCityToolDefinition()
));
Assistant assistant = client.createAssistant(assistantCreationOptions);
AssistantThread thread = client.createThread(new AssistantThreadCreationOptions());
client.createMessage(thread.getId(), MessageRole.USER, "What is the weather like in my favorite city?");
ThreadRun run = client.createRun(thread, assistant);
do {
Thread.sleep(500);
run = client.getRun(thread.getId(), run.getId());
if (run.getStatus() == RunStatus.REQUIRES_ACTION
&& run.getRequiredAction() instanceof SubmitToolOutputsAction) {
SubmitToolOutputsAction requiredAction = (SubmitToolOutputsAction) run.getRequiredAction();
List<ToolOutput> toolOutputs = new ArrayList<>();
for (RequiredToolCall toolCall : requiredAction.getSubmitToolOutputs().getToolCalls()) {
toolOutputs.add(getResolvedToolOutput(toolCall));
}
run = client.submitToolOutputsToRun(thread.getId(), run.getId(), toolOutputs);
}
} while (run.getStatus() == RunStatus.QUEUED || run.getStatus() == RunStatus.IN_PROGRESS);
OpenAIPageableListOfThreadMessage messagesPage = client.listMessages(thread.getId());
List<ThreadMessage> messages = messagesPage.getData();
for (ThreadMessage message : messages) {
for (MessageContent contentItem : message.getContent()) {
if (contentItem instanceof MessageTextContent) {
System.out.println(((MessageTextContent) contentItem).getText().getValue());
} else if (contentItem instanceof MessageImageFileContent) {
System.out.println(((MessageImageFileContent) contentItem).getImageFile().getFileId());
}
}
}
}
public static final String GET_USER_FAVORITE_CITY = "getUserFavoriteCity";
public static final String GET_CITY_NICKNAME = "getCityNickname";
public static final String GET_WEATHER_AT_LOCATION = "getWeatherAtLocation";
private FunctionToolDefinition getUserFavoriteCityToolDefinition() {
class UserFavoriteCityParameters {
@JsonProperty("type")
private String type = "object";
@JsonProperty("properties")
private Map<String, Object> properties = new HashMap<>();
}
return new FunctionToolDefinition(
new FunctionDefinition(
GET_USER_FAVORITE_CITY,
BinaryData.fromObject(new UserFavoriteCityParameters()
)
).setDescription("Gets the user's favorite city."));
}
private static String getUserFavoriteCity() {
return "Seattle, WA";
}
private static String getCityNickname(String location) {
return "The Emerald City";
}
private static String getWeatherAtLocation(String location, String temperatureUnit) {
return temperatureUnit.equals("f") ? "70f" : "21c";
}
private ToolOutput getResolvedToolOutput(RequiredToolCall toolCall) {
if (toolCall instanceof RequiredFunctionToolCall) {
RequiredFunctionToolCall functionToolCall = (RequiredFunctionToolCall) toolCall;
if (functionToolCall.getFunction().getName().equals(GET_USER_FAVORITE_CITY)) {
return new ToolOutput().setToolCallId(toolCall.getId())
.setOutput(getUserFavoriteCity());
}
if (functionToolCall.getFunction().getName().equals(GET_CITY_NICKNAME)) {
Map<String, String> parameters = BinaryData.fromString(
functionToolCall.getFunction().getArguments())
.toObject(new TypeReference<Map<String, String>>() {});
String location = parameters.get("location");
return new ToolOutput().setToolCallId(toolCall.getId())
.setOutput(getCityNickname(location));
}
if (functionToolCall.getFunction().getName().equals(GET_WEATHER_AT_LOCATION)) {
Map<String, String> parameters = BinaryData.fromString(
functionToolCall.getFunction().getArguments())
.toObject(new TypeReference<Map<String, String>>() {});
String location = parameters.get("location");
String unit = parameters.getOrDefault("unit", "c");
return new ToolOutput().setToolCallId(toolCall.getId())
.setOutput(getWeatherAtLocation(location, unit));
}
}
return null;
}
} |
Aside from the logging change, was this a bug before if a null token was returned here that we didn't continue to the next possible token retrieval call? | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | } | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | class ClientAssertionCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientAssertionCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates an instance of ClientAssertionCredential.
*
* @param clientId the client ID of user assigned or system assigned identity.
* @param tenantId the tenant ID of the application
* @param clientAssertion the supplier of the client assertion
* @param identityClientOptions the options to configure the identity client
*/
ClientAssertionCredential(String clientId, String tenantId, Supplier<String> clientAssertion,
IdentityClientOptions identityClientOptions) {
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientAssertionSupplier(clientAssertion)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request,
error));
}
@Override
} | class ClientAssertionCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientAssertionCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates an instance of ClientAssertionCredential.
*
* @param clientId the client ID of user assigned or system assigned identity.
* @param tenantId the tenant ID of the application
* @param clientAssertion the supplier of the client assertion
* @param identityClientOptions the options to configure the identity client
*/
ClientAssertionCredential(String clientId, String tenantId, Supplier<String> clientAssertion,
IdentityClientOptions identityClientOptions) {
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientAssertionSupplier(clientAssertion)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request,
error));
}
@Override
} |
INFO or VERBOSE? I'm thinking VERBOSE as this is really nitty-gritty information that is super low level. | public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
} | LOGGER.info("Token not found in the MSAL cache."); | public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Acquire a token from the confidential client.
*
* @param request the details of the token request
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
}
/**
* Acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Acquire a token from the confidential client.
*
* @param request the details of the token request
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
}
/**
* Acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} |
Can msal return null here ? I don't think they do, but if they do, its a bug. If we got a token response, it is expected to be a non null and a success scenario. So, the null check seems redundant. | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | return token; | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | class ClientCertificateCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientCertificateCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates a ClientCertificateCredential with default identity client options.
* @param tenantId the tenant ID of the application
* @param clientId the client ID of the application
* @param certificatePath the PEM file or PFX file containing the certificate
* @param certificate the PEM or PFX certificate
* @param certificatePassword the password protecting the PFX file
* @param identityClientOptions the options to configure the identity client
*/
ClientCertificateCredential(String tenantId, String clientId, String certificatePath, byte[] certificate,
String certificatePassword, IdentityClientOptions identityClientOptions) {
Objects.requireNonNull(certificatePath == null ? certificate : certificatePath,
"'certificate' and 'certificatePath' cannot both be null.");
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.certificatePath(certificatePath)
.certificate(certificate)
.certificatePassword(certificatePassword)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
}
@Override
} | class ClientCertificateCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientCertificateCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates a ClientCertificateCredential with default identity client options.
* @param tenantId the tenant ID of the application
* @param clientId the client ID of the application
* @param certificatePath the PEM file or PFX file containing the certificate
* @param certificate the PEM or PFX certificate
* @param certificatePassword the password protecting the PFX file
* @param identityClientOptions the options to configure the identity client
*/
ClientCertificateCredential(String tenantId, String clientId, String certificatePath, byte[] certificate,
String certificatePassword, IdentityClientOptions identityClientOptions) {
Objects.requireNonNull(certificatePath == null ? certificate : certificatePath,
"'certificate' and 'certificatePath' cannot both be null.");
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.certificatePath(certificatePath)
.certificate(certificate)
.certificatePassword(certificatePassword)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
}
@Override
} |
Yeah, Debug scenario probably sounds better for this. This will also align with the goal of making our logs less noisy. | public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
} | LOGGER.info("Token not found in the MSAL cache."); | public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Acquire a token from the confidential client.
*
* @param request the details of the token request
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
}
/**
* Acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Acquire a token from the confidential client.
*
* @param request the details of the token request
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
}
/**
* Acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} |
I can go either way. It seemed like something a customer might be curious about so I went with INFO. I think of VERBOSE as for SDK devs basically. | public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
} | LOGGER.info("Token not found in the MSAL cache."); | public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Acquire a token from the confidential client.
*
* @param request the details of the token request
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.info("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
}
/**
* Acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Acquire a token from the confidential client.
*
* @param request the details of the token request
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
if (e.getMessage().contains("Token not found in the cache")) {
LOGGER.verbose("Token not found in the MSAL cache.");
return null;
} else {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
}
/**
* Acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return An access token, or null if no token exists in the cache.
*/
@SuppressWarnings("deprecation")
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} |
No. It either returned a token or threw an exception. I'm adding the null state to represent the cache miss. | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | } | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | class ClientAssertionCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientAssertionCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates an instance of ClientAssertionCredential.
*
* @param clientId the client ID of user assigned or system assigned identity.
* @param tenantId the tenant ID of the application
* @param clientAssertion the supplier of the client assertion
* @param identityClientOptions the options to configure the identity client
*/
ClientAssertionCredential(String clientId, String tenantId, Supplier<String> clientAssertion,
IdentityClientOptions identityClientOptions) {
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientAssertionSupplier(clientAssertion)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request,
error));
}
@Override
} | class ClientAssertionCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientAssertionCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates an instance of ClientAssertionCredential.
*
* @param clientId the client ID of user assigned or system assigned identity.
* @param tenantId the tenant ID of the application
* @param clientAssertion the supplier of the client assertion
* @param identityClientOptions the options to configure the identity client
*/
ClientAssertionCredential(String clientId, String tenantId, Supplier<String> clientAssertion,
IdentityClientOptions identityClientOptions) {
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.clientAssertionSupplier(clientAssertion)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request,
error));
}
@Override
} |
I added returning null in the case where we do not get a token from the cache. | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | return token; | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | class ClientCertificateCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientCertificateCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates a ClientCertificateCredential with default identity client options.
* @param tenantId the tenant ID of the application
* @param clientId the client ID of the application
* @param certificatePath the PEM file or PFX file containing the certificate
* @param certificate the PEM or PFX certificate
* @param certificatePassword the password protecting the PFX file
* @param identityClientOptions the options to configure the identity client
*/
ClientCertificateCredential(String tenantId, String clientId, String certificatePath, byte[] certificate,
String certificatePassword, IdentityClientOptions identityClientOptions) {
Objects.requireNonNull(certificatePath == null ? certificate : certificatePath,
"'certificate' and 'certificatePath' cannot both be null.");
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.certificatePath(certificatePath)
.certificate(certificate)
.certificatePassword(certificatePassword)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
}
@Override
} | class ClientCertificateCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientCertificateCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates a ClientCertificateCredential with default identity client options.
* @param tenantId the tenant ID of the application
* @param clientId the client ID of the application
* @param certificatePath the PEM file or PFX file containing the certificate
* @param certificate the PEM or PFX certificate
* @param certificatePassword the password protecting the PFX file
* @param identityClientOptions the options to configure the identity client
*/
ClientCertificateCredential(String tenantId, String clientId, String certificatePath, byte[] certificate,
String certificatePassword, IdentityClientOptions identityClientOptions) {
Objects.requireNonNull(certificatePath == null ? certificate : certificatePath,
"'certificate' and 'certificatePath' cannot both be null.");
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.certificatePath(certificatePath)
.certificate(certificate)
.certificatePassword(certificatePassword)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
}
@Override
} |
if there's no token to return, then an exception will get thrown I believe from Msal. So, in what scenario do we get null back for token ? | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | return token; | public AccessToken getTokenSync(TokenRequestContext request) {
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request);
if (token != null) {
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
}
} catch (Exception e) { }
try {
AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request);
LoggingUtil.logTokenSuccess(LOGGER, request);
return token;
} catch (Exception e) {
LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e);
throw e;
}
} | class ClientCertificateCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientCertificateCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates a ClientCertificateCredential with default identity client options.
* @param tenantId the tenant ID of the application
* @param clientId the client ID of the application
* @param certificatePath the PEM file or PFX file containing the certificate
* @param certificate the PEM or PFX certificate
* @param certificatePassword the password protecting the PFX file
* @param identityClientOptions the options to configure the identity client
*/
ClientCertificateCredential(String tenantId, String clientId, String certificatePath, byte[] certificate,
String certificatePassword, IdentityClientOptions identityClientOptions) {
Objects.requireNonNull(certificatePath == null ? certificate : certificatePath,
"'certificate' and 'certificatePath' cannot both be null.");
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.certificatePath(certificatePath)
.certificate(certificate)
.certificatePassword(certificatePassword)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
}
@Override
} | class ClientCertificateCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ClientCertificateCredential.class);
private final IdentityClient identityClient;
private final IdentitySyncClient identitySyncClient;
/**
* Creates a ClientCertificateCredential with default identity client options.
* @param tenantId the tenant ID of the application
* @param clientId the client ID of the application
* @param certificatePath the PEM file or PFX file containing the certificate
* @param certificate the PEM or PFX certificate
* @param certificatePassword the password protecting the PFX file
* @param identityClientOptions the options to configure the identity client
*/
ClientCertificateCredential(String tenantId, String clientId, String certificatePath, byte[] certificate,
String certificatePassword, IdentityClientOptions identityClientOptions) {
Objects.requireNonNull(certificatePath == null ? certificate : certificatePath,
"'certificate' and 'certificatePath' cannot both be null.");
IdentityClientBuilder builder = new IdentityClientBuilder()
.tenantId(tenantId)
.clientId(clientId)
.certificatePath(certificatePath)
.certificate(certificate)
.certificatePassword(certificatePassword)
.identityClientOptions(identityClientOptions);
identityClient = builder.build();
identitySyncClient = builder.buildSyncClient();
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return identityClient.authenticateWithConfidentialClientCache(request)
.onErrorResume(t -> Mono.empty())
.switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request)))
.doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request))
.doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(),
request, error));
}
@Override
} |
```suggestion return prefix + " - " + RMResources.InternalServerError; ``` | public static String getInternalServerErrorMessage(String prefix) {
return prefix + "-" + RMResources.InternalServerError;
} | return prefix + "-" + RMResources.InternalServerError; | public static String getInternalServerErrorMessage(String prefix) {
return prefix + " - " + RMResources.InternalServerError;
} | class Exceptions {
public static boolean isStatusCode(CosmosException e, int status) {
return status == e.getStatusCode();
}
public static boolean isSubStatusCode(CosmosException e, int subStatus) {
return subStatus == e.getSubStatusCode();
}
public static boolean isGone(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE);
}
public static boolean isConflict(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.CONFLICT);
}
public static boolean isNotFound(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.NOTFOUND);
}
public static boolean isPartitionSplitOrMerge(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE);
}
public static boolean isNameCacheStale(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE);
}
public static boolean isThroughputControlRequestRateTooLargeException(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.TOO_MANY_REQUESTS)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.THROUGHPUT_CONTROL_REQUEST_RATE_TOO_LARGE);
}
public static boolean isPartitionCompletingSplittingException(CosmosException cosmosException) {
return Exceptions.isStatusCode(cosmosException, HttpConstants.StatusCodes.GONE) &&
Exceptions.isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.COMPLETING_SPLIT_OR_MERGE);
}
public static boolean isPartitionKeyMismatchException(CosmosException cosmosException) {
return Exceptions.isStatusCode(cosmosException, HttpConstants.StatusCodes.NOTFOUND) &&
Exceptions.isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.PARTITION_KEY_MISMATCH);
}
} | class Exceptions {
public static boolean isStatusCode(CosmosException e, int status) {
return status == e.getStatusCode();
}
public static boolean isSubStatusCode(CosmosException e, int subStatus) {
return subStatus == e.getSubStatusCode();
}
public static boolean isGone(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE);
}
public static boolean isConflict(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.CONFLICT);
}
public static boolean isNotFound(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.NOTFOUND);
}
public static boolean isPartitionSplitOrMerge(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE);
}
public static boolean isNameCacheStale(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE);
}
public static boolean isThroughputControlRequestRateTooLargeException(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.TOO_MANY_REQUESTS)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.THROUGHPUT_CONTROL_REQUEST_RATE_TOO_LARGE);
}
public static boolean isPartitionCompletingSplittingException(CosmosException cosmosException) {
return Exceptions.isStatusCode(cosmosException, HttpConstants.StatusCodes.GONE) &&
Exceptions.isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.COMPLETING_SPLIT_OR_MERGE);
}
public static boolean isPartitionKeyMismatchException(CosmosException cosmosException) {
return Exceptions.isStatusCode(cosmosException, HttpConstants.StatusCodes.NOTFOUND) &&
Exceptions.isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.PARTITION_KEY_MISMATCH);
}
} |
updated | public static String getInternalServerErrorMessage(String prefix) {
return prefix + "-" + RMResources.InternalServerError;
} | return prefix + "-" + RMResources.InternalServerError; | public static String getInternalServerErrorMessage(String prefix) {
return prefix + " - " + RMResources.InternalServerError;
} | class Exceptions {
public static boolean isStatusCode(CosmosException e, int status) {
return status == e.getStatusCode();
}
public static boolean isSubStatusCode(CosmosException e, int subStatus) {
return subStatus == e.getSubStatusCode();
}
public static boolean isGone(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE);
}
public static boolean isConflict(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.CONFLICT);
}
public static boolean isNotFound(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.NOTFOUND);
}
public static boolean isPartitionSplitOrMerge(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE);
}
public static boolean isNameCacheStale(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE);
}
public static boolean isThroughputControlRequestRateTooLargeException(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.TOO_MANY_REQUESTS)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.THROUGHPUT_CONTROL_REQUEST_RATE_TOO_LARGE);
}
public static boolean isPartitionCompletingSplittingException(CosmosException cosmosException) {
return Exceptions.isStatusCode(cosmosException, HttpConstants.StatusCodes.GONE) &&
Exceptions.isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.COMPLETING_SPLIT_OR_MERGE);
}
public static boolean isPartitionKeyMismatchException(CosmosException cosmosException) {
return Exceptions.isStatusCode(cosmosException, HttpConstants.StatusCodes.NOTFOUND) &&
Exceptions.isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.PARTITION_KEY_MISMATCH);
}
} | class Exceptions {
public static boolean isStatusCode(CosmosException e, int status) {
return status == e.getStatusCode();
}
public static boolean isSubStatusCode(CosmosException e, int subStatus) {
return subStatus == e.getSubStatusCode();
}
public static boolean isGone(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE);
}
public static boolean isConflict(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.CONFLICT);
}
public static boolean isNotFound(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.NOTFOUND);
}
public static boolean isPartitionSplitOrMerge(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE);
}
public static boolean isNameCacheStale(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.GONE)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE);
}
public static boolean isThroughputControlRequestRateTooLargeException(CosmosException e) {
return isStatusCode(e, HttpConstants.StatusCodes.TOO_MANY_REQUESTS)
&& isSubStatusCode(e, HttpConstants.SubStatusCodes.THROUGHPUT_CONTROL_REQUEST_RATE_TOO_LARGE);
}
public static boolean isPartitionCompletingSplittingException(CosmosException cosmosException) {
return Exceptions.isStatusCode(cosmosException, HttpConstants.StatusCodes.GONE) &&
Exceptions.isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.COMPLETING_SPLIT_OR_MERGE);
}
public static boolean isPartitionKeyMismatchException(CosmosException cosmosException) {
return Exceptions.isStatusCode(cosmosException, HttpConstants.StatusCodes.NOTFOUND) &&
Exceptions.isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.PARTITION_KEY_MISMATCH);
}
} |
Do we need to sort this `srvRecords`? Is there no priority order for the user-defined endpoints array in yaml file? If we sort this array, then it doesn't follow the order that user defined. | public void updateAutoFailoverEndpoints() {
if (semaphore.tryAcquire()) {
for (ConfigStore configStore : properties.getStores()) {
if (!configStore.isEnabled()) {
continue;
}
String mainEndpoint = configStore.getEndpoint();
List<String> providedEndpoints = new ArrayList<>();
if (configStore.getConnectionStrings().size() > 0) {
providedEndpoints = configStore.getConnectionStrings().stream().map(connectionString -> {
return (AppConfigurationReplicaClientsBuilder
.getEndpointFromConnectionString(connectionString));
}).toList();
} else if (configStore.getEndpoints().size() > 0) {
providedEndpoints = configStore.getEndpoints();
} else {
providedEndpoints = List.of(configStore.getEndpoint());
}
try {
List<SRVRecord> srvRecords = findAutoFailoverEndpoints(mainEndpoint, providedEndpoints);
srvRecords.sort((SRVRecord a, SRVRecord b) -> a.compareTo(b));
records.put(mainEndpoint, srvRecords);
wait.put(mainEndpoint, Instant.now().plus(FALLBACK_CLIENT_REFRESH_EXPIRED_INTERVAL));
} catch (AppConfigurationReplicaException e) {
wait.put(mainEndpoint, Instant.now().plus(MINIMAL_CLIENT_REFRESH_INTERVAL));
}
}
semaphore.release();
}
} | srvRecords.sort((SRVRecord a, SRVRecord b) -> a.compareTo(b)); | public void updateAutoFailoverEndpoints() {
if (semaphore.tryAcquire()) {
for (ConfigStore configStore : properties.getStores()) {
if (!configStore.isEnabled()) {
continue;
}
String mainEndpoint = configStore.getEndpoint();
List<String> providedEndpoints = new ArrayList<>();
if (configStore.getConnectionStrings().size() > 0) {
providedEndpoints = configStore.getConnectionStrings().stream().map(connectionString -> {
return (AppConfigurationReplicaClientsBuilder
.getEndpointFromConnectionString(connectionString));
}).toList();
} else if (configStore.getEndpoints().size() > 0) {
providedEndpoints = configStore.getEndpoints();
} else {
providedEndpoints = List.of(configStore.getEndpoint());
}
try {
List<SRVRecord> srvRecords = findAutoFailoverEndpoints(mainEndpoint, providedEndpoints);
srvRecords.sort((SRVRecord a, SRVRecord b) -> a.compareTo(b));
records.put(mainEndpoint, srvRecords);
wait.put(mainEndpoint, Instant.now().plus(FALLBACK_CLIENT_REFRESH_EXPIRED_INTERVAL));
} catch (AppConfigurationReplicaException e) {
wait.put(mainEndpoint, Instant.now().plus(MINIMAL_CLIENT_REFRESH_INTERVAL));
}
}
semaphore.release();
}
} | class ReplicaLookUp {
private static final String ORIGIN_PREFIX = "dns:/_origin._tcp.";
private static final String REPLICA_PREFIX_ALT = "dns:/_alt";
private static final String REPLICA_PREFIX_TCP = "._tcp.";
private static final String SRC_RECORD = "SRV";
private static final String[] TRUSTED_DOMAIN_LABELS = { "azconfig", "appconfig" };
private static final Duration FALLBACK_CLIENT_REFRESH_EXPIRED_INTERVAL = Duration.ofHours(1);
private static final Duration MINIMAL_CLIENT_REFRESH_INTERVAL = Duration.ofSeconds(30);
InitialDirContext context;
private Map<String, List<SRVRecord>> records = new HashMap<String, List<SRVRecord>>();
private Map<String, Instant> wait = new HashMap<>();
private final AppConfigurationProperties properties;
private final Semaphore semaphore;
public ReplicaLookUp(AppConfigurationProperties properties) throws NamingException {
this.properties = properties;
this.context = new InitialDirContext();
this.semaphore = new Semaphore(1);
}
@Async
public List<String> getAutoFailoverEndpoints(String mainEndpoint) {
List<SRVRecord> endpointRecords = records.get(mainEndpoint);
if (endpointRecords == null) {
return List.of();
}
return endpointRecords.stream().map(record -> record.getEndpoint()).toList();
}
private List<SRVRecord> findAutoFailoverEndpoints(String endpoint, List<String> providedEndpoints)
throws AppConfigurationReplicaException {
List<SRVRecord> records = new ArrayList<>();
String host = "";
try {
URI uri = new URI(endpoint);
host = uri.getHost();
} catch (URISyntaxException e) {
return new ArrayList<>();
}
SRVRecord origin = getOriginRecord(host);
if (origin != null) {
List<SRVRecord> replicas = getReplicaRecords(origin);
String knownDomain = getKnownDomain(endpoint);
if (!providedEndpoints.contains(origin.getEndpoint()) && validate(knownDomain, origin.getEndpoint())) {
records.add(origin);
}
replicas.stream().forEach(replica -> {
if (!providedEndpoints.contains(replica.getEndpoint())
&& validate(knownDomain, replica.getEndpoint())) {
records.add(replica);
}
});
}
return records;
}
private SRVRecord getOriginRecord(String url) throws AppConfigurationReplicaException {
Attribute attribute = requestRecord(ORIGIN_PREFIX + url);
if (attribute != null) {
return parseHosts(attribute).get(0);
}
return null;
}
private List<SRVRecord> getReplicaRecords(SRVRecord origin) throws AppConfigurationReplicaException {
List<SRVRecord> replicas = new ArrayList<>();
int i = 0;
while (true) {
Attribute attribute = requestRecord(
REPLICA_PREFIX_ALT + i + REPLICA_PREFIX_TCP + origin.getTarget());
if (attribute == null) {
break;
}
replicas.addAll(parseHosts(attribute));
i++;
}
return replicas;
}
private Attribute requestRecord(String name) throws AppConfigurationReplicaException {
Instant retryTime = Instant.now().plusSeconds(30);
while (retryTime.isAfter(Instant.now())) {
try {
return context.getAttributes(name, new String[] { SRC_RECORD }).get(SRC_RECORD);
} catch (NameNotFoundException e) {
return null;
} catch (NamingException e) {
}
}
throw new AppConfigurationReplicaException();
}
private List<SRVRecord> parseHosts(Attribute attribute) {
List<SRVRecord> hosts = new ArrayList<>();
try {
NamingEnumeration<?> records = attribute.getAll();
while (records.hasMore()) {
hosts.add(new SRVRecord(((String) records.next()).toString().split(" ")));
}
} catch (NamingException e) {
}
return hosts;
}
private boolean validate(String knownDomain, String endpoint) {
if (!StringUtils.hasText(endpoint)) {
return false;
}
if (!StringUtils.hasText(knownDomain)) {
return false;
}
return endpoint.endsWith(knownDomain);
}
private String getKnownDomain(String knownHost) {
for (String label : TRUSTED_DOMAIN_LABELS) {
int index = knownHost.toLowerCase().indexOf("." + label + ".");
if (index > 0) {
return knownHost.substring(index);
}
}
return "";
}
private class AppConfigurationReplicaException extends Exception {
/**
*
*/
private static final long serialVersionUID = 1L;
}
} | class ReplicaLookUp {
private static final String ORIGIN_PREFIX = "dns:/_origin._tcp.";
private static final String REPLICA_PREFIX_ALT = "dns:/_alt";
private static final String REPLICA_PREFIX_TCP = "._tcp.";
private static final String SRC_RECORD = "SRV";
private static final String[] TRUSTED_DOMAIN_LABELS = { "azconfig", "appconfig" };
private static final Duration FALLBACK_CLIENT_REFRESH_EXPIRED_INTERVAL = Duration.ofHours(1);
private static final Duration MINIMAL_CLIENT_REFRESH_INTERVAL = Duration.ofSeconds(30);
InitialDirContext context;
private Map<String, List<SRVRecord>> records = new HashMap<String, List<SRVRecord>>();
private Map<String, Instant> wait = new HashMap<>();
private final AppConfigurationProperties properties;
private final Semaphore semaphore;
public ReplicaLookUp(AppConfigurationProperties properties) throws NamingException {
this.properties = properties;
this.context = new InitialDirContext();
this.semaphore = new Semaphore(1);
}
@Async
public List<String> getAutoFailoverEndpoints(String mainEndpoint) {
List<SRVRecord> endpointRecords = records.get(mainEndpoint);
if (endpointRecords == null) {
return List.of();
}
return endpointRecords.stream().map(record -> record.getEndpoint()).toList();
}
private List<SRVRecord> findAutoFailoverEndpoints(String endpoint, List<String> providedEndpoints)
throws AppConfigurationReplicaException {
List<SRVRecord> records = new ArrayList<>();
String host = "";
try {
URI uri = new URI(endpoint);
host = uri.getHost();
} catch (URISyntaxException e) {
return new ArrayList<>();
}
SRVRecord origin = getOriginRecord(host);
if (origin != null) {
List<SRVRecord> replicas = getReplicaRecords(origin);
String knownDomain = getKnownDomain(endpoint);
if (!providedEndpoints.contains(origin.getEndpoint()) && validate(knownDomain, origin.getEndpoint())) {
records.add(origin);
}
replicas.stream().forEach(replica -> {
if (!providedEndpoints.contains(replica.getEndpoint())
&& validate(knownDomain, replica.getEndpoint())) {
records.add(replica);
}
});
}
return records;
}
private SRVRecord getOriginRecord(String url) throws AppConfigurationReplicaException {
Attribute attribute = requestRecord(ORIGIN_PREFIX + url);
if (attribute != null) {
return parseHosts(attribute).get(0);
}
return null;
}
private List<SRVRecord> getReplicaRecords(SRVRecord origin) throws AppConfigurationReplicaException {
List<SRVRecord> replicas = new ArrayList<>();
int i = 0;
while (true) {
Attribute attribute = requestRecord(
REPLICA_PREFIX_ALT + i + REPLICA_PREFIX_TCP + origin.getTarget());
if (attribute == null) {
break;
}
replicas.addAll(parseHosts(attribute));
i++;
}
return replicas;
}
private Attribute requestRecord(String name) throws AppConfigurationReplicaException {
Instant retryTime = Instant.now().plusSeconds(30);
while (retryTime.isAfter(Instant.now())) {
try {
return context.getAttributes(name, new String[] { SRC_RECORD }).get(SRC_RECORD);
} catch (NameNotFoundException e) {
return null;
} catch (NamingException e) {
}
}
throw new AppConfigurationReplicaException();
}
private List<SRVRecord> parseHosts(Attribute attribute) {
List<SRVRecord> hosts = new ArrayList<>();
try {
NamingEnumeration<?> records = attribute.getAll();
while (records.hasMore()) {
hosts.add(new SRVRecord(((String) records.next()).toString().split(" ")));
}
} catch (NamingException e) {
}
return hosts;
}
private boolean validate(String knownDomain, String endpoint) {
if (!StringUtils.hasText(endpoint)) {
return false;
}
if (!StringUtils.hasText(knownDomain)) {
return false;
}
return endpoint.endsWith(knownDomain);
}
private String getKnownDomain(String knownHost) {
for (String label : TRUSTED_DOMAIN_LABELS) {
int index = knownHost.toLowerCase().indexOf("." + label + ".");
if (index > 0) {
return knownHost.substring(index);
}
}
return "";
}
private class AppConfigurationReplicaException extends Exception {
/**
*
*/
private static final long serialVersionUID = 1L;
}
} |
This came up in the design review. We plan on supporting the SRV priority and weight values. When a user defines multiple stores, we follow the list they provide, top down where the top option is the highest priority. | public void updateAutoFailoverEndpoints() {
if (semaphore.tryAcquire()) {
for (ConfigStore configStore : properties.getStores()) {
if (!configStore.isEnabled()) {
continue;
}
String mainEndpoint = configStore.getEndpoint();
List<String> providedEndpoints = new ArrayList<>();
if (configStore.getConnectionStrings().size() > 0) {
providedEndpoints = configStore.getConnectionStrings().stream().map(connectionString -> {
return (AppConfigurationReplicaClientsBuilder
.getEndpointFromConnectionString(connectionString));
}).toList();
} else if (configStore.getEndpoints().size() > 0) {
providedEndpoints = configStore.getEndpoints();
} else {
providedEndpoints = List.of(configStore.getEndpoint());
}
try {
List<SRVRecord> srvRecords = findAutoFailoverEndpoints(mainEndpoint, providedEndpoints);
srvRecords.sort((SRVRecord a, SRVRecord b) -> a.compareTo(b));
records.put(mainEndpoint, srvRecords);
wait.put(mainEndpoint, Instant.now().plus(FALLBACK_CLIENT_REFRESH_EXPIRED_INTERVAL));
} catch (AppConfigurationReplicaException e) {
wait.put(mainEndpoint, Instant.now().plus(MINIMAL_CLIENT_REFRESH_INTERVAL));
}
}
semaphore.release();
}
} | srvRecords.sort((SRVRecord a, SRVRecord b) -> a.compareTo(b)); | public void updateAutoFailoverEndpoints() {
if (semaphore.tryAcquire()) {
for (ConfigStore configStore : properties.getStores()) {
if (!configStore.isEnabled()) {
continue;
}
String mainEndpoint = configStore.getEndpoint();
List<String> providedEndpoints = new ArrayList<>();
if (configStore.getConnectionStrings().size() > 0) {
providedEndpoints = configStore.getConnectionStrings().stream().map(connectionString -> {
return (AppConfigurationReplicaClientsBuilder
.getEndpointFromConnectionString(connectionString));
}).toList();
} else if (configStore.getEndpoints().size() > 0) {
providedEndpoints = configStore.getEndpoints();
} else {
providedEndpoints = List.of(configStore.getEndpoint());
}
try {
List<SRVRecord> srvRecords = findAutoFailoverEndpoints(mainEndpoint, providedEndpoints);
srvRecords.sort((SRVRecord a, SRVRecord b) -> a.compareTo(b));
records.put(mainEndpoint, srvRecords);
wait.put(mainEndpoint, Instant.now().plus(FALLBACK_CLIENT_REFRESH_EXPIRED_INTERVAL));
} catch (AppConfigurationReplicaException e) {
wait.put(mainEndpoint, Instant.now().plus(MINIMAL_CLIENT_REFRESH_INTERVAL));
}
}
semaphore.release();
}
} | class ReplicaLookUp {
private static final String ORIGIN_PREFIX = "dns:/_origin._tcp.";
private static final String REPLICA_PREFIX_ALT = "dns:/_alt";
private static final String REPLICA_PREFIX_TCP = "._tcp.";
private static final String SRC_RECORD = "SRV";
private static final String[] TRUSTED_DOMAIN_LABELS = { "azconfig", "appconfig" };
private static final Duration FALLBACK_CLIENT_REFRESH_EXPIRED_INTERVAL = Duration.ofHours(1);
private static final Duration MINIMAL_CLIENT_REFRESH_INTERVAL = Duration.ofSeconds(30);
InitialDirContext context;
private Map<String, List<SRVRecord>> records = new HashMap<String, List<SRVRecord>>();
private Map<String, Instant> wait = new HashMap<>();
private final AppConfigurationProperties properties;
private final Semaphore semaphore;
public ReplicaLookUp(AppConfigurationProperties properties) throws NamingException {
this.properties = properties;
this.context = new InitialDirContext();
this.semaphore = new Semaphore(1);
}
@Async
public List<String> getAutoFailoverEndpoints(String mainEndpoint) {
List<SRVRecord> endpointRecords = records.get(mainEndpoint);
if (endpointRecords == null) {
return List.of();
}
return endpointRecords.stream().map(record -> record.getEndpoint()).toList();
}
private List<SRVRecord> findAutoFailoverEndpoints(String endpoint, List<String> providedEndpoints)
throws AppConfigurationReplicaException {
List<SRVRecord> records = new ArrayList<>();
String host = "";
try {
URI uri = new URI(endpoint);
host = uri.getHost();
} catch (URISyntaxException e) {
return new ArrayList<>();
}
SRVRecord origin = getOriginRecord(host);
if (origin != null) {
List<SRVRecord> replicas = getReplicaRecords(origin);
String knownDomain = getKnownDomain(endpoint);
if (!providedEndpoints.contains(origin.getEndpoint()) && validate(knownDomain, origin.getEndpoint())) {
records.add(origin);
}
replicas.stream().forEach(replica -> {
if (!providedEndpoints.contains(replica.getEndpoint())
&& validate(knownDomain, replica.getEndpoint())) {
records.add(replica);
}
});
}
return records;
}
private SRVRecord getOriginRecord(String url) throws AppConfigurationReplicaException {
Attribute attribute = requestRecord(ORIGIN_PREFIX + url);
if (attribute != null) {
return parseHosts(attribute).get(0);
}
return null;
}
private List<SRVRecord> getReplicaRecords(SRVRecord origin) throws AppConfigurationReplicaException {
List<SRVRecord> replicas = new ArrayList<>();
int i = 0;
while (true) {
Attribute attribute = requestRecord(
REPLICA_PREFIX_ALT + i + REPLICA_PREFIX_TCP + origin.getTarget());
if (attribute == null) {
break;
}
replicas.addAll(parseHosts(attribute));
i++;
}
return replicas;
}
private Attribute requestRecord(String name) throws AppConfigurationReplicaException {
Instant retryTime = Instant.now().plusSeconds(30);
while (retryTime.isAfter(Instant.now())) {
try {
return context.getAttributes(name, new String[] { SRC_RECORD }).get(SRC_RECORD);
} catch (NameNotFoundException e) {
return null;
} catch (NamingException e) {
}
}
throw new AppConfigurationReplicaException();
}
private List<SRVRecord> parseHosts(Attribute attribute) {
List<SRVRecord> hosts = new ArrayList<>();
try {
NamingEnumeration<?> records = attribute.getAll();
while (records.hasMore()) {
hosts.add(new SRVRecord(((String) records.next()).toString().split(" ")));
}
} catch (NamingException e) {
}
return hosts;
}
private boolean validate(String knownDomain, String endpoint) {
if (!StringUtils.hasText(endpoint)) {
return false;
}
if (!StringUtils.hasText(knownDomain)) {
return false;
}
return endpoint.endsWith(knownDomain);
}
private String getKnownDomain(String knownHost) {
for (String label : TRUSTED_DOMAIN_LABELS) {
int index = knownHost.toLowerCase().indexOf("." + label + ".");
if (index > 0) {
return knownHost.substring(index);
}
}
return "";
}
private class AppConfigurationReplicaException extends Exception {
/**
*
*/
private static final long serialVersionUID = 1L;
}
} | class ReplicaLookUp {
private static final String ORIGIN_PREFIX = "dns:/_origin._tcp.";
private static final String REPLICA_PREFIX_ALT = "dns:/_alt";
private static final String REPLICA_PREFIX_TCP = "._tcp.";
private static final String SRC_RECORD = "SRV";
private static final String[] TRUSTED_DOMAIN_LABELS = { "azconfig", "appconfig" };
private static final Duration FALLBACK_CLIENT_REFRESH_EXPIRED_INTERVAL = Duration.ofHours(1);
private static final Duration MINIMAL_CLIENT_REFRESH_INTERVAL = Duration.ofSeconds(30);
InitialDirContext context;
private Map<String, List<SRVRecord>> records = new HashMap<String, List<SRVRecord>>();
private Map<String, Instant> wait = new HashMap<>();
private final AppConfigurationProperties properties;
private final Semaphore semaphore;
public ReplicaLookUp(AppConfigurationProperties properties) throws NamingException {
this.properties = properties;
this.context = new InitialDirContext();
this.semaphore = new Semaphore(1);
}
@Async
public List<String> getAutoFailoverEndpoints(String mainEndpoint) {
List<SRVRecord> endpointRecords = records.get(mainEndpoint);
if (endpointRecords == null) {
return List.of();
}
return endpointRecords.stream().map(record -> record.getEndpoint()).toList();
}
private List<SRVRecord> findAutoFailoverEndpoints(String endpoint, List<String> providedEndpoints)
throws AppConfigurationReplicaException {
List<SRVRecord> records = new ArrayList<>();
String host = "";
try {
URI uri = new URI(endpoint);
host = uri.getHost();
} catch (URISyntaxException e) {
return new ArrayList<>();
}
SRVRecord origin = getOriginRecord(host);
if (origin != null) {
List<SRVRecord> replicas = getReplicaRecords(origin);
String knownDomain = getKnownDomain(endpoint);
if (!providedEndpoints.contains(origin.getEndpoint()) && validate(knownDomain, origin.getEndpoint())) {
records.add(origin);
}
replicas.stream().forEach(replica -> {
if (!providedEndpoints.contains(replica.getEndpoint())
&& validate(knownDomain, replica.getEndpoint())) {
records.add(replica);
}
});
}
return records;
}
private SRVRecord getOriginRecord(String url) throws AppConfigurationReplicaException {
Attribute attribute = requestRecord(ORIGIN_PREFIX + url);
if (attribute != null) {
return parseHosts(attribute).get(0);
}
return null;
}
private List<SRVRecord> getReplicaRecords(SRVRecord origin) throws AppConfigurationReplicaException {
List<SRVRecord> replicas = new ArrayList<>();
int i = 0;
while (true) {
Attribute attribute = requestRecord(
REPLICA_PREFIX_ALT + i + REPLICA_PREFIX_TCP + origin.getTarget());
if (attribute == null) {
break;
}
replicas.addAll(parseHosts(attribute));
i++;
}
return replicas;
}
private Attribute requestRecord(String name) throws AppConfigurationReplicaException {
Instant retryTime = Instant.now().plusSeconds(30);
while (retryTime.isAfter(Instant.now())) {
try {
return context.getAttributes(name, new String[] { SRC_RECORD }).get(SRC_RECORD);
} catch (NameNotFoundException e) {
return null;
} catch (NamingException e) {
}
}
throw new AppConfigurationReplicaException();
}
private List<SRVRecord> parseHosts(Attribute attribute) {
List<SRVRecord> hosts = new ArrayList<>();
try {
NamingEnumeration<?> records = attribute.getAll();
while (records.hasMore()) {
hosts.add(new SRVRecord(((String) records.next()).toString().split(" ")));
}
} catch (NamingException e) {
}
return hosts;
}
private boolean validate(String knownDomain, String endpoint) {
if (!StringUtils.hasText(endpoint)) {
return false;
}
if (!StringUtils.hasText(knownDomain)) {
return false;
}
return endpoint.endsWith(knownDomain);
}
private String getKnownDomain(String knownHost) {
for (String label : TRUSTED_DOMAIN_LABELS) {
int index = knownHost.toLowerCase().indexOf("." + label + ".");
if (index > 0) {
return knownHost.substring(index);
}
}
return "";
}
private class AppConfigurationReplicaException extends Exception {
/**
*
*/
private static final long serialVersionUID = 1L;
}
} |
Should we log the exception here? | public static ExecutorService addShutdownHookSafely(ExecutorService executorService, Duration shutdownTimeout) {
if (executorService == null) {
return null;
}
Objects.requireNonNull(shutdownTimeout, "'shutdownTimeout' cannot be null.");
if (shutdownTimeout.isZero() || shutdownTimeout.isNegative()) {
throw new IllegalArgumentException("'shutdownTimeout' must be a non-zero positive duration.");
}
long timeoutNanos = shutdownTimeout.toNanos();
Thread shutdownThread = new Thread(() -> {
try {
executorService.shutdown();
if (!executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS)) {
executorService.shutdownNow();
executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
executorService.shutdown();
}
});
if (shutdownHookAccessHelper) {
java.security.AccessController.doPrivileged((java.security.PrivilegedAction<Void>) () -> {
Runtime.getRuntime().addShutdownHook(shutdownThread);
return null;
});
} else {
Runtime.getRuntime().addShutdownHook(shutdownThread);
}
return executorService;
} | Thread.currentThread().interrupt(); | public static ExecutorService addShutdownHookSafely(ExecutorService executorService, Duration shutdownTimeout) {
if (executorService == null) {
return null;
}
Objects.requireNonNull(shutdownTimeout, "'shutdownTimeout' cannot be null.");
if (shutdownTimeout.isZero() || shutdownTimeout.isNegative()) {
throw new IllegalArgumentException("'shutdownTimeout' must be a non-zero positive duration.");
}
long timeoutNanos = shutdownTimeout.toNanos();
Thread shutdownThread = new Thread(() -> {
try {
executorService.shutdown();
if (!executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS)) {
executorService.shutdownNow();
executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
executorService.shutdown();
}
});
if (ShutdownHookAccessHelperHolder.shutdownHookAccessHelper) {
java.security.AccessController.doPrivileged((java.security.PrivilegedAction<Void>) () -> {
Runtime.getRuntime().addShutdownHook(shutdownThread);
return null;
});
} else {
Runtime.getRuntime().addShutdownHook(shutdownThread);
}
return executorService;
} | class from an array of Objects.
*
* @param args Array of objects to search through to find the first instance of the given `clazz` type.
* @param clazz The type trying to be found.
* @param <T> Generic type
* @return The first object of the desired type, otherwise null.
*/
public static <T> T findFirstOfType(Object[] args, Class<T> clazz) {
if (isNullOrEmpty(args)) {
return null;
}
for (Object arg : args) {
if (clazz.isInstance(arg)) {
return clazz.cast(arg);
}
}
return null;
} | class from an array of Objects.
*
* @param args Array of objects to search through to find the first instance of the given `clazz` type.
* @param clazz The type trying to be found.
* @param <T> Generic type
* @return The first object of the desired type, otherwise null.
*/
public static <T> T findFirstOfType(Object[] args, Class<T> clazz) {
if (isNullOrEmpty(args)) {
return null;
}
for (Object arg : args) {
if (clazz.isInstance(arg)) {
return clazz.cast(arg);
}
}
return null;
} |
We don't in any place doing this currently, given this is happening during shutdown maybe it's best we don't? | public static ExecutorService addShutdownHookSafely(ExecutorService executorService, Duration shutdownTimeout) {
if (executorService == null) {
return null;
}
Objects.requireNonNull(shutdownTimeout, "'shutdownTimeout' cannot be null.");
if (shutdownTimeout.isZero() || shutdownTimeout.isNegative()) {
throw new IllegalArgumentException("'shutdownTimeout' must be a non-zero positive duration.");
}
long timeoutNanos = shutdownTimeout.toNanos();
Thread shutdownThread = new Thread(() -> {
try {
executorService.shutdown();
if (!executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS)) {
executorService.shutdownNow();
executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
executorService.shutdown();
}
});
if (shutdownHookAccessHelper) {
java.security.AccessController.doPrivileged((java.security.PrivilegedAction<Void>) () -> {
Runtime.getRuntime().addShutdownHook(shutdownThread);
return null;
});
} else {
Runtime.getRuntime().addShutdownHook(shutdownThread);
}
return executorService;
} | Thread.currentThread().interrupt(); | public static ExecutorService addShutdownHookSafely(ExecutorService executorService, Duration shutdownTimeout) {
if (executorService == null) {
return null;
}
Objects.requireNonNull(shutdownTimeout, "'shutdownTimeout' cannot be null.");
if (shutdownTimeout.isZero() || shutdownTimeout.isNegative()) {
throw new IllegalArgumentException("'shutdownTimeout' must be a non-zero positive duration.");
}
long timeoutNanos = shutdownTimeout.toNanos();
Thread shutdownThread = new Thread(() -> {
try {
executorService.shutdown();
if (!executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS)) {
executorService.shutdownNow();
executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
executorService.shutdown();
}
});
if (ShutdownHookAccessHelperHolder.shutdownHookAccessHelper) {
java.security.AccessController.doPrivileged((java.security.PrivilegedAction<Void>) () -> {
Runtime.getRuntime().addShutdownHook(shutdownThread);
return null;
});
} else {
Runtime.getRuntime().addShutdownHook(shutdownThread);
}
return executorService;
} | class from an array of Objects.
*
* @param args Array of objects to search through to find the first instance of the given `clazz` type.
* @param clazz The type trying to be found.
* @param <T> Generic type
* @return The first object of the desired type, otherwise null.
*/
public static <T> T findFirstOfType(Object[] args, Class<T> clazz) {
if (isNullOrEmpty(args)) {
return null;
}
for (Object arg : args) {
if (clazz.isInstance(arg)) {
return clazz.cast(arg);
}
}
return null;
} | class from an array of Objects.
*
* @param args Array of objects to search through to find the first instance of the given `clazz` type.
* @param clazz The type trying to be found.
* @param <T> Generic type
* @return The first object of the desired type, otherwise null.
*/
public static <T> T findFirstOfType(Object[] args, Class<T> clazz) {
if (isNullOrEmpty(args)) {
return null;
}
for (Object arg : args) {
if (clazz.isInstance(arg)) {
return clazz.cast(arg);
}
}
return null;
} |
nit: fix comment | public OkHttpAsyncHttpClientBuilder responseTimeout(Duration responseTimeout) {
this.responseTimeout = responseTimeout;
return this;
} | public OkHttpAsyncHttpClientBuilder responseTimeout(Duration responseTimeout) {
this.responseTimeout = responseTimeout;
return this;
} | class OkHttpAsyncHttpClientBuilder {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpAsyncHttpClientBuilder.class);
private final okhttp3.OkHttpClient okHttpClient;
private static final Duration MINIMUM_TIMEOUT = Duration.ofMillis(1);
private static final Duration DEFAULT_CONNECT_TIMEOUT;
private static final Duration DEFAULT_WRITE_TIMEOUT;
private static final Duration DEFAULT_RESPONSE_TIMEOUT;
private static final Duration DEFAULT_READ_TIMEOUT;
static {
ClientLogger logger = new ClientLogger(OkHttpAsyncHttpClientBuilder.class);
Configuration configuration = Configuration.getGlobalConfiguration();
DEFAULT_CONNECT_TIMEOUT = getDefaultTimeoutFromEnvironment(configuration,
PROPERTY_AZURE_REQUEST_CONNECT_TIMEOUT, Duration.ofSeconds(10), logger);
DEFAULT_WRITE_TIMEOUT = getDefaultTimeoutFromEnvironment(configuration, PROPERTY_AZURE_REQUEST_WRITE_TIMEOUT,
Duration.ofSeconds(60), logger);
DEFAULT_RESPONSE_TIMEOUT = getDefaultTimeoutFromEnvironment(configuration,
PROPERTY_AZURE_REQUEST_RESPONSE_TIMEOUT, Duration.ofSeconds(60), logger);
DEFAULT_READ_TIMEOUT = getDefaultTimeoutFromEnvironment(configuration, PROPERTY_AZURE_REQUEST_READ_TIMEOUT,
Duration.ofSeconds(60), logger);
}
private List<Interceptor> networkInterceptors = new ArrayList<>();
private Duration readTimeout;
private Duration responseTimeout;
private Duration writeTimeout;
private Duration connectionTimeout;
private Duration callTimeout;
private ConnectionPool connectionPool;
private Dispatcher dispatcher;
private ProxyOptions proxyOptions;
private Configuration configuration;
private boolean followRedirects;
/**
* Creates OkHttpAsyncHttpClientBuilder.
*/
public OkHttpAsyncHttpClientBuilder() {
this.okHttpClient = null;
}
/**
* Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient.
*
* @param okHttpClient the httpclient
*/
public OkHttpAsyncHttpClientBuilder(OkHttpClient okHttpClient) {
this.okHttpClient = Objects.requireNonNull(okHttpClient, "'okHttpClient' cannot be null.");
}
/**
* Add a network layer interceptor to Http request pipeline.
*
* @param networkInterceptor the interceptor to add
* @return the updated OkHttpAsyncHttpClientBuilder object
*/
public OkHttpAsyncHttpClientBuilder addNetworkInterceptor(Interceptor networkInterceptor) {
Objects.requireNonNull(networkInterceptor, "'networkInterceptor' cannot be null.");
this.networkInterceptors.add(networkInterceptor);
return this;
}
/**
* Add network layer interceptors to Http request pipeline.
* <p>
* This replaces all previously-set interceptors.
*
* @param networkInterceptors The interceptors to add.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) {
this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "'networkInterceptors' cannot be null.");
return this;
}
/**
* Sets the read timeout duration used when reading the server response.
* <p>
* The read timeout begins once the first response read is triggered after the server response is received. This
* timeout triggers periodically but won't fire its operation if another read operation has completed between when
* the timeout is triggered and completes.
* <p>
* If {@code readTimeout} is null or {@link Configuration
* timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout period will be
* applied to response read. When applying the timeout the greatest of one millisecond and the value of {@code
* readTimeout} will be used.
*
* @param readTimeout Read timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) {
this.readTimeout = readTimeout;
return this;
}
/**
* Sets the response timeout duration used when waiting for a server to reply.
* <p>
* The response timeout begins once the request write completes and finishes once the first response read is
* triggered when the server response is received.
* <p>
* If {@code responseTimeout} is null either {@link Configuration
* 60-second timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout will be
* applied to the response. When applying the timeout the greatest of one millisecond and the value of {@code
* responseTimeout} will be used.
* <p>
* Given OkHttp doesn't have an equivalent timeout for just responses, this is handled manually.
*
* @param responseTimeout Response timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
/**
* Sets the writing timeout for a request to be sent.
* <p>
* The writing timeout does not apply to the entire request but to the request being sent over the wire. For example
* a request body which emits {@code 10} {@code 8KB} buffers will trigger {@code 10} write operations, the last
* write tracker will update when each operation completes and the outbound buffer will be periodically checked to
* determine if it is still draining.
* <p>
* If {@code writeTimeout} is null either {@link Configuration
* timeout will be used, if it is a {@link Duration} less than or equal to zero then no write timeout will be
* applied. When applying the timeout the greatest of one millisecond and the value of {@code writeTimeout} will be
* used.
*
* @param writeTimeout Write operation timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder writeTimeout(Duration writeTimeout) {
this.writeTimeout = writeTimeout;
return this;
}
/**
* Sets the connection timeout for a request to be sent.
* <p>
* The connection timeout begins once the request attempts to connect to the remote host and finishes once the
* connection is resolved.
* <p>
* If {@code connectTimeout} is null either {@link Configuration
* 10-second timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout will be
* applied. When applying the timeout the greatest of one millisecond and the value of {@code connectTimeout} will
* be used.
* <p>
* By default, the connection timeout is 10 seconds.
*
* @param connectionTimeout Connect timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) {
this.connectionTimeout = connectionTimeout;
return this;
}
/**
* Sets the default timeout for complete calls.
* <p>
* The call timeout spans the entire call: resolving DNS, connecting, writing the request body,
* server processing, and reading the response body.
* <p>
* Null or {@link Duration
* must be between 1 and {@link Integer
* <p>
* By default, call timeout is not enabled.
*
* @param callTimeout Call timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder callTimeout(Duration callTimeout) {
if (callTimeout != null && callTimeout.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'callTimeout' cannot be negative"));
}
this.callTimeout = callTimeout;
return this;
}
/**
* Sets the Http connection pool.
*
* @param connectionPool The OkHttp connection pool to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) {
this.connectionPool = Objects.requireNonNull(connectionPool, "'connectionPool' cannot be null.");
return this;
}
/**
* Sets the dispatcher that also composes the thread pool for executing HTTP requests.
*
* @param dispatcher The dispatcher to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) {
this.dispatcher = Objects.requireNonNull(dispatcher, "'dispatcher' cannot be null.");
return this;
}
/**
* Sets the proxy.
*
* @param proxyOptions The proxy configuration to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the HTTP client.
* <p>
* The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* <p>Sets the followRedirect flag on the underlying OkHttp-backed {@link com.azure.core.http.HttpClient}.</p>
*
* <p>If this is set to 'true' redirects will be followed automatically, and
* if your HTTP pipeline is configured with a redirect policy it will not be called.</p>
*
* @param followRedirects The followRedirects value to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder followRedirects(boolean followRedirects) {
this.followRedirects = followRedirects;
return this;
}
/**
* Creates a new OkHttp-backed {@link com.azure.core.http.HttpClient} instance on every call, using the
* configuration set in the builder at the time of the build method call.
*
* @return A new OkHttp-backed {@link com.azure.core.http.HttpClient} instance.
*/
public HttpClient build() {
OkHttpClient.Builder httpClientBuilder
= this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder();
for (Interceptor interceptor : this.networkInterceptors) {
httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor);
}
httpClientBuilder = httpClientBuilder.connectTimeout(getTimeout(connectionTimeout, DEFAULT_CONNECT_TIMEOUT))
.writeTimeout(getTimeout(writeTimeout, DEFAULT_WRITE_TIMEOUT))
.readTimeout(getTimeout(readTimeout, DEFAULT_READ_TIMEOUT));
if (callTimeout != null) {
httpClientBuilder.callTimeout(callTimeout);
}
if (this.connectionPool != null) {
httpClientBuilder = httpClientBuilder.connectionPool(connectionPool);
}
if (this.dispatcher != null) {
httpClientBuilder = httpClientBuilder.dispatcher(dispatcher);
}
Configuration buildConfiguration
= (configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
ProxyOptions buildProxyOptions
= (proxyOptions == null) ? ProxyOptions.fromConfiguration(buildConfiguration, true) : proxyOptions;
if (buildProxyOptions != null) {
httpClientBuilder
= httpClientBuilder.proxySelector(new OkHttpProxySelector(buildProxyOptions.getType().toProxyType(),
buildProxyOptions::getAddress, buildProxyOptions.getNonProxyHosts()));
if (buildProxyOptions.getUsername() != null) {
ProxyAuthenticator proxyAuthenticator
= new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword());
httpClientBuilder = httpClientBuilder.proxyAuthenticator(proxyAuthenticator)
.addInterceptor(proxyAuthenticator.getProxyAuthenticationInfoInterceptor());
}
}
httpClientBuilder.followRedirects(this.followRedirects);
return new OkHttpAsyncHttpClient(httpClientBuilder.build(), responseTimeout);
}
/*
* Returns the timeout in milliseconds to use based on the passed Duration and default timeout.
*
* If the timeout is {@code null} the default timeout will be used. If the timeout is less than or equal to zero
* no timeout will be used. If the timeout is less than one millisecond a timeout of one millisecond will be used.
*/
static Duration getTimeout(Duration configuredTimeout, Duration defaultTimeout) {
if (configuredTimeout == null) {
return defaultTimeout;
}
if (configuredTimeout.isZero() || configuredTimeout.isNegative()) {
return Duration.ZERO;
}
if (configuredTimeout.compareTo(MINIMUM_TIMEOUT) < 0) {
return MINIMUM_TIMEOUT;
} else {
return configuredTimeout;
}
}
} | class OkHttpAsyncHttpClientBuilder {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpAsyncHttpClientBuilder.class);
private final okhttp3.OkHttpClient okHttpClient;
private List<Interceptor> networkInterceptors = new ArrayList<>();
private Duration readTimeout;
private Duration responseTimeout;
private Duration writeTimeout;
private Duration connectionTimeout;
private Duration callTimeout;
private ConnectionPool connectionPool;
private Dispatcher dispatcher;
private ProxyOptions proxyOptions;
private Configuration configuration;
private boolean followRedirects;
/**
* Creates OkHttpAsyncHttpClientBuilder.
*/
public OkHttpAsyncHttpClientBuilder() {
this.okHttpClient = null;
}
/**
* Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient.
*
* @param okHttpClient the httpclient
*/
public OkHttpAsyncHttpClientBuilder(OkHttpClient okHttpClient) {
this.okHttpClient = Objects.requireNonNull(okHttpClient, "'okHttpClient' cannot be null.");
}
/**
* Add a network layer interceptor to Http request pipeline.
*
* @param networkInterceptor the interceptor to add
* @return the updated OkHttpAsyncHttpClientBuilder object
*/
public OkHttpAsyncHttpClientBuilder addNetworkInterceptor(Interceptor networkInterceptor) {
Objects.requireNonNull(networkInterceptor, "'networkInterceptor' cannot be null.");
this.networkInterceptors.add(networkInterceptor);
return this;
}
/**
* Add network layer interceptors to Http request pipeline.
* <p>
* This replaces all previously-set interceptors.
*
* @param networkInterceptors The interceptors to add.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) {
this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "'networkInterceptors' cannot be null.");
return this;
}
/**
* Sets the read timeout duration used when reading the server response.
* <p>
* The read timeout begins once the first response read is triggered after the server response is received. This
* timeout triggers periodically but won't fire its operation if another read operation has completed between when
* the timeout is triggered and completes.
* <p>
* If {@code readTimeout} is null or {@link Configuration
* timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout period will be
* applied to response read. When applying the timeout the greatest of one millisecond and the value of {@code
* readTimeout} will be used.
*
* @param readTimeout Read timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) {
this.readTimeout = readTimeout;
return this;
}
/**
* Sets the response timeout duration used when waiting for a server to reply.
* <p>
* The response timeout begins once the request write completes and finishes once the first response read is
* triggered when the server response is received.
* <p>
* If {@code responseTimeout} is null either {@link Configuration
* 60-second timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout will be
* applied to the response. When applying the timeout the greatest of one millisecond and the value of {@code
* responseTimeout} will be used.
* <p>
* Given OkHttp doesn't have an equivalent timeout for just responses, this is handled manually.
*
* @param responseTimeout Response timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
/**
* Sets the writing timeout for a request to be sent.
* <p>
* The writing timeout does not apply to the entire request but to the request being sent over the wire. For example
* a request body which emits {@code 10} {@code 8KB} buffers will trigger {@code 10} write operations, the last
* write tracker will update when each operation completes and the outbound buffer will be periodically checked to
* determine if it is still draining.
* <p>
* If {@code writeTimeout} is null either {@link Configuration
* timeout will be used, if it is a {@link Duration} less than or equal to zero then no write timeout will be
* applied. When applying the timeout the greatest of one millisecond and the value of {@code writeTimeout} will be
* used.
*
* @param writeTimeout Write operation timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder writeTimeout(Duration writeTimeout) {
this.writeTimeout = writeTimeout;
return this;
}
/**
* Sets the connection timeout for a request to be sent.
* <p>
* The connection timeout begins once the request attempts to connect to the remote host and finishes once the
* connection is resolved.
* <p>
* If {@code connectTimeout} is null either {@link Configuration
* 10-second timeout will be used, if it is a {@link Duration} less than or equal to zero then no timeout will be
* applied. When applying the timeout the greatest of one millisecond and the value of {@code connectTimeout} will
* be used.
* <p>
* By default, the connection timeout is 10 seconds.
*
* @param connectionTimeout Connect timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) {
this.connectionTimeout = connectionTimeout;
return this;
}
/**
* Sets the default timeout for complete calls.
* <p>
* The call timeout spans the entire call: resolving DNS, connecting, writing the request body,
* server processing, and reading the response body.
* <p>
* Null or {@link Duration
* must be between 1 and {@link Integer
* <p>
* By default, call timeout is not enabled.
*
* @param callTimeout Call timeout duration.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder callTimeout(Duration callTimeout) {
if (callTimeout != null && callTimeout.isNegative()) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'callTimeout' cannot be negative"));
}
this.callTimeout = callTimeout;
return this;
}
/**
* Sets the Http connection pool.
*
* @param connectionPool The OkHttp connection pool to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) {
this.connectionPool = Objects.requireNonNull(connectionPool, "'connectionPool' cannot be null.");
return this;
}
/**
* Sets the dispatcher that also composes the thread pool for executing HTTP requests.
*
* @param dispatcher The dispatcher to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
* @see OkHttpClient.Builder
*/
public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) {
this.dispatcher = Objects.requireNonNull(dispatcher, "'dispatcher' cannot be null.");
return this;
}
/**
* Sets the proxy.
*
* @param proxyOptions The proxy configuration to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* Sets the configuration store that is used during construction of the HTTP client.
* <p>
* The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* <p>Sets the followRedirect flag on the underlying OkHttp-backed {@link com.azure.core.http.HttpClient}.</p>
*
* <p>If this is set to 'true' redirects will be followed automatically, and
* if your HTTP pipeline is configured with a redirect policy it will not be called.</p>
*
* @param followRedirects The followRedirects value to use.
* @return The updated OkHttpAsyncHttpClientBuilder object.
*/
public OkHttpAsyncHttpClientBuilder followRedirects(boolean followRedirects) {
this.followRedirects = followRedirects;
return this;
}
/**
* Creates a new OkHttp-backed {@link com.azure.core.http.HttpClient} instance on every call, using the
* configuration set in the builder at the time of the build method call.
*
* @return A new OkHttp-backed {@link com.azure.core.http.HttpClient} instance.
*/
public HttpClient build() {
OkHttpClient.Builder httpClientBuilder
= this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder();
for (Interceptor interceptor : this.networkInterceptors) {
httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor);
}
httpClientBuilder = httpClientBuilder.connectTimeout(getTimeout(connectionTimeout, getDefaultConnectTimeout()))
.writeTimeout(getTimeout(writeTimeout, getDefaultWriteTimeout()))
.readTimeout(getTimeout(readTimeout, getDefaultReadTimeout()));
if (callTimeout != null) {
httpClientBuilder.callTimeout(callTimeout);
}
if (this.connectionPool != null) {
httpClientBuilder = httpClientBuilder.connectionPool(connectionPool);
}
if (this.dispatcher != null) {
httpClientBuilder = httpClientBuilder.dispatcher(dispatcher);
}
Configuration buildConfiguration
= (configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
ProxyOptions buildProxyOptions
= (proxyOptions == null) ? ProxyOptions.fromConfiguration(buildConfiguration, true) : proxyOptions;
if (buildProxyOptions != null) {
httpClientBuilder
= httpClientBuilder.proxySelector(new OkHttpProxySelector(buildProxyOptions.getType().toProxyType(),
buildProxyOptions::getAddress, buildProxyOptions.getNonProxyHosts()));
if (buildProxyOptions.getUsername() != null) {
ProxyAuthenticator proxyAuthenticator
= new ProxyAuthenticator(buildProxyOptions.getUsername(), buildProxyOptions.getPassword());
httpClientBuilder = httpClientBuilder.proxyAuthenticator(proxyAuthenticator)
.addInterceptor(proxyAuthenticator.getProxyAuthenticationInfoInterceptor());
}
}
httpClientBuilder.followRedirects(this.followRedirects);
return new OkHttpAsyncHttpClient(httpClientBuilder.build(),
getTimeout(responseTimeout, getDefaultResponseTimeout()));
}
} | |
ResourceManagerUtils.toPrimitiveBoolean | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy()); | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
The input parameter of the method `ResourceManagerUtils.toPrimitiveBoolean` must be of type `Boolean`, but the type of `ZoneRedundancy` is an enumeration. So I think `ResourceManagerUtils.toPrimitiveBoolean` is not applicable here.  | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy()); | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Seems the logic can be simplified to just `ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy()`? | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy()); | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
The judgment of `isNull` is still needed.  | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy()); | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
When `zoneRedundancy` is null, it means `isZoneRedundancyEnabled` is false, right? Seems to align with `ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy()`'s result. Anyway, no hurt to have the null check. | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy()); | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Fixed in the [pr#38592](https://github.com/Azure/azure-sdk-for-java/pull/38592) | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy()); | public boolean isZoneRedundancyEnabled() {
return !Objects.isNull(this.innerModel().zoneRedundancy()) && ZoneRedundancy.ENABLED.equals(this.innerModel().zoneRedundancy());
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class RegistryImpl extends GroupableResourceImpl<Registry, RegistryInner, RegistryImpl, ContainerRegistryManager>
implements Registry, Registry.Definition, Registry.Update {
private RegistryUpdateParameters updateParameters;
private WebhooksImpl webhooks;
protected RegistryImpl(
String name, RegistryInner innerObject, ContainerRegistryManager manager) {
super(name, innerObject, manager);
this.webhooks = new WebhooksImpl(this, "Webhook");
}
@Override
protected Mono<RegistryInner> getInnerAsync() {
return this.manager().serviceClient().getRegistries()
.getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryImpl update() {
updateParameters = new RegistryUpdateParameters();
return super.update();
}
@Override
public Mono<Registry> createResourceAsync() {
final RegistryImpl self = this;
if (isInCreateMode()) {
return manager()
.serviceClient()
.getRegistries()
.createAsync(self.resourceGroupName(), self.name(), self.innerModel())
.map(innerToFluentMap(this));
} else {
updateParameters.withTags(innerModel().tags());
return manager()
.serviceClient()
.getRegistries()
.updateAsync(self.resourceGroupName(), self.name(), self.updateParameters)
.map(innerToFluentMap(this));
}
}
@Override
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) {
this.webhooks.clear();
return Mono.empty();
}
@Override
public Sku sku() {
return this.innerModel().sku();
}
@Override
public String loginServerUrl() {
return this.innerModel().loginServer();
}
@Override
public OffsetDateTime creationDate() {
return this.innerModel().creationDate();
}
@Override
public boolean adminUserEnabled() {
return this.innerModel().adminUserEnabled();
}
@Override
public RegistryImpl withBasicSku() {
return setManagedSku(new Sku().withName(SkuName.BASIC));
}
@Override
public RegistryImpl withStandardSku() {
return setManagedSku(new Sku().withName(SkuName.STANDARD));
}
@Override
public RegistryImpl withPremiumSku() {
return setManagedSku(new Sku().withName(SkuName.PREMIUM));
}
private RegistryImpl setManagedSku(Sku sku) {
if (this.isInCreateMode()) {
this.innerModel().withSku(sku);
} else {
this.updateParameters.withSku(sku);
}
return this;
}
@Override
public RegistryImpl withRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(true);
} else {
this.updateParameters.withAdminUserEnabled(true);
}
return this;
}
@Override
public RegistryImpl withoutRegistryNameAsAdminUser() {
if (this.isInCreateMode()) {
this.innerModel().withAdminUserEnabled(false);
} else {
this.updateParameters.withAdminUserEnabled(false);
}
return this;
}
@Override
public RegistryCredentials getCredentials() {
return this.manager().containerRegistries().getCredentials(this.resourceGroupName(), this.name());
}
@Override
public Mono<RegistryCredentials> getCredentialsAsync() {
return this.manager().containerRegistries().getCredentialsAsync(this.resourceGroupName(), this.name());
}
@Override
public RegistryCredentials regenerateCredential(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredential(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Mono<RegistryCredentials> regenerateCredentialAsync(AccessKeyType accessKeyType) {
return this
.manager()
.containerRegistries()
.regenerateCredentialAsync(this.resourceGroupName(), this.name(), accessKeyType);
}
@Override
public Collection<RegistryUsage> listQuotaUsages() {
return this.manager().containerRegistries().listQuotaUsages(this.resourceGroupName(), this.name());
}
@Override
public PagedFlux<RegistryUsage> listQuotaUsagesAsync() {
return this.manager().containerRegistries().listQuotaUsagesAsync(this.resourceGroupName(), this.name());
}
@Override
public WebhookOperations webhooks() {
return new WebhookOperationsImpl(this);
}
@Override
public PublicNetworkAccess publicNetworkAccess() {
return innerModel().publicNetworkAccess();
}
@Override
public boolean canAccessFromTrustedServices() {
return this.innerModel().networkRuleBypassOptions() == NetworkRuleBypassOptions.AZURE_SERVICES;
}
@Override
public NetworkRuleSet networkRuleSet() {
return this.innerModel().networkRuleSet();
}
@Override
public boolean isDedicatedDataEndpointsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().dataEndpointEnabled());
}
@Override
@Override
public List<String> dedicatedDataEndpointsHostNames() {
return this.innerModel().dataEndpointHostNames() == null
? Collections.emptyList() : Collections.unmodifiableList(this.innerModel().dataEndpointHostNames());
}
@Override
public RegistryTaskRun.DefinitionStages.BlankFromRegistry scheduleRun() {
return new RegistryTaskRunImpl(this.manager(), new RunInner())
.withExistingRegistry(this.resourceGroupName(), this.name());
}
@Override
public SourceUploadDefinition getBuildSourceUploadUrl() {
return this.getBuildSourceUploadUrlAsync().block();
}
@Override
public Mono<SourceUploadDefinition> getBuildSourceUploadUrlAsync() {
return this
.manager()
.serviceClient()
.getRegistries()
.getBuildSourceUploadUrlAsync(this.resourceGroupName(), this.name())
.map(sourceUploadDefinitionInner -> new SourceUploadDefinitionImpl(sourceUploadDefinitionInner));
}
@Override
public RegistryImpl withoutWebhook(String name) {
webhooks.withoutWebhook(name);
return this;
}
@Override
public WebhookImpl updateWebhook(String name) {
return webhooks.updateWebhook(name);
}
@Override
public WebhookImpl defineWebhook(String name) {
return webhooks.defineWebhook(name);
}
@Override
public RegistryImpl enablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.ENABLED);
}
return this;
}
@Override
public RegistryImpl disablePublicNetworkAccess() {
if (this.isInCreateMode()) {
this.innerModel().withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
} else {
updateParameters.withPublicNetworkAccess(PublicNetworkAccess.DISABLED);
}
return this;
}
@Override
public RegistryImpl withAccessFromSelectedNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.DENY);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.DENY);
}
return this;
}
@Override
public RegistryImpl withAccessFromAllNetworks() {
ensureNetworkRuleSet();
if (isInCreateMode()) {
this.innerModel().networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
} else {
updateParameters.networkRuleSet().withDefaultAction(DefaultAction.ALLOW);
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddressRange(String ipAddressCidr) {
ensureNetworkRuleSet();
if (this.innerModel().networkRuleSet().ipRules()
.stream().noneMatch(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr))) {
this.innerModel().networkRuleSet().ipRules().add(new IpRule().withAction(Action.ALLOW).withIpAddressOrRange(ipAddressCidr));
}
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withoutAccessFromIpAddressRange(String ipAddressCidr) {
if (this.innerModel().networkRuleSet() == null) {
return this;
}
ensureNetworkRuleSet();
this.innerModel().networkRuleSet().ipRules().removeIf(ipRule -> Objects.equals(ipRule.ipAddressOrRange(), ipAddressCidr));
if (!isInCreateMode()) {
updateParameters.networkRuleSet().withIpRules(this.innerModel().networkRuleSet().ipRules());
}
return this;
}
@Override
public RegistryImpl withAccessFromIpAddress(String ipAddress) {
return withAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withoutAccessFromIpAddress(String ipAddress) {
return withoutAccessFromIpAddressRange(ipAddress);
}
@Override
public RegistryImpl withAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.AZURE_SERVICES);
}
return this;
}
@Override
public RegistryImpl withoutAccessFromTrustedServices() {
if (isInCreateMode()) {
this.innerModel().withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
} else {
updateParameters.withNetworkRuleBypassOptions(NetworkRuleBypassOptions.NONE);
}
return this;
}
@Override
public RegistryImpl enableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(true);
} else {
updateParameters.withDataEndpointEnabled(true);
}
return this;
}
@Override
public RegistryImpl disableDedicatedDataEndpoints() {
if (isInCreateMode()) {
this.innerModel().withDataEndpointEnabled(false);
} else {
updateParameters.withDataEndpointEnabled(false);
}
return this;
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(this.listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.createOrUpdateAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
ConnectionStatus.REJECTED)))
.then();
}
private void ensureNetworkRuleSet() {
if (this.isInCreateMode()) {
if (this.innerModel().networkRuleSet() == null) {
this.innerModel().withNetworkRuleSet(new NetworkRuleSet());
this.innerModel().networkRuleSet().withIpRules(new ArrayList<>());
}
} else {
if (updateParameters.networkRuleSet() == null) {
updateParameters.withNetworkRuleSet(this.innerModel().networkRuleSet());
}
}
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(this.listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
return this.manager().serviceClient().getRegistries()
.listPrivateLinkResourcesAsync(this.resourceGroupName(), this.name())
.mapPage(PrivateLinkResourceImpl::new);
}
@Override
public RegistryImpl withZoneRedundancy() {
if (isInCreateMode()) {
this.innerModel().withZoneRedundancy(ZoneRedundancy.ENABLED);
}
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final PrivateLinkResourceInner innerModel;
private PrivateLinkResourceImpl(PrivateLinkResourceInner innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionsRequired() == null
? ActionsRequired.NONE.toString()
: innerModel.privateLinkServiceConnectionState().actionsRequired().toString());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Enable [AssertingClient Testing](https://github.com/Azure/azure-sdk-for-java/pull/33177/files#diff-53439b928744c1bab1221881a598ddc21919338f3ad896811f32c0213a207a56R69-R83), this makes sure sync call API's are running in completely synchronous call stack and vice-versa for async stack. | public void createOrUpdateTest() {
BinaryData body = BinaryData.fromObject(getTestBodyFromDict());
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().createOrUpdateTestWithResponse(newTestIdAsync, body, null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
} | Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().createOrUpdateTestWithResponse(newTestIdAsync, body, null); | public void createOrUpdateTest() {
BinaryData body = BinaryData.fromObject(getTestBodyFromDict());
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.createOrUpdateTestWithResponse(newTestIdAsync, body, null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
} | class LoadTestAdministrationAsyncTests extends LoadTestingClientTestBase {
private Map<String, Object> getTestBodyFromDict() {
Map<String, Object> testMap = new HashMap<String, Object>();
testMap.put("displayName", "Java SDK Sample Test Async");
testMap.put("description", "Sample Test Async");
Map<String, Object> loadTestConfigMap = new HashMap<String, Object>();
loadTestConfigMap.put("engineInstances", 1);
testMap.put("loadTestConfiguration", loadTestConfigMap);
Map<String, Object> envVarMap = new HashMap<String, Object>();
envVarMap.put("threads_per_engine", 1);
envVarMap.put("ramp_up_time", 0);
envVarMap.put("duration_in_sec", 10);
envVarMap.put("domain", "azure.microsoft.com");
envVarMap.put("protocol", "https");
testMap.put("environmentVariables", envVarMap);
return testMap;
}
private BinaryData getFileBodyFromResource(String fileName) {
URL url = LoadTestAdministrationAsyncTests.class.getClassLoader().getResource(fileName);
return BinaryData.fromFile(new File(url.getPath()).toPath());
}
@Test
@Order(1)
@Test
@Order(2)
public void beginUploadTestFileAdditionalFiles() {
BinaryData file = getFileBodyFromResource(uploadCsvFileName);
RequestOptions requestOptions = new RequestOptions().addQueryParam("fileType", "ADDITIONAL_ARTIFACTS");
PollerFlux<BinaryData, BinaryData> poller = adminBuilder.buildAsyncClient().beginUploadTestFile(
newTestIdAsync,
uploadCsvFileName,
file,
requestOptions);
poller = setPlaybackPollerFluxPollInterval(poller);
StepVerifier.create(poller.last()).assertNext(pollResponse -> {
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}).verifyComplete();
}
@Test
@Order(3)
public void beginUploadTestFileTestScript() {
BinaryData file = getFileBodyFromResource(uploadJmxFileName);
RequestOptions fileUploadRequestOptions = new RequestOptions().addQueryParam("fileType", "JMX_FILE");
PollerFlux<BinaryData, BinaryData> poller = adminBuilder.buildAsyncClient().beginUploadTestFile(newTestIdAsync, uploadJmxFileName, file, fileUploadRequestOptions);
poller = setPlaybackPollerFluxPollInterval(poller);
StepVerifier.create(poller.takeUntil(pollResponse -> pollResponse.getStatus().isComplete()).last().flatMap(AsyncPollResponse::getFinalResult)).assertNext(fileBinary -> {
try {
JsonNode fileNode = OBJECT_MAPPER.readTree(fileBinary.toString());
String validationStatus = fileNode.get("validationStatus").asText();
Assertions.assertTrue(fileNode.get("fileName").asText().equals(uploadJmxFileName) && "VALIDATION_SUCCESS".equals(validationStatus));
} catch (Exception e) {
Assertions.assertTrue(false);
}
}).verifyComplete();
}
@Test
@Order(4)
public void createOrUpdateAppComponents() {
BinaryData body = BinaryData.fromObject(getAppComponentBodyFromDict());
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().createOrUpdateAppComponentsWithResponse(
newTestIdAsync,
body,
null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
}
@Test
@Order(5)
public void createOrUpdateServerMetricsConfig() {
BinaryData body = BinaryData.fromObject(getServerMetricsBodyFromDict());
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().createOrUpdateServerMetricsConfigWithResponse(
newTestIdAsync,
body,
null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
}
@Test
@Order(6)
public void getTestFile() {
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().getTestFileWithResponse(newTestIdAsync, uploadJmxFileName, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode file = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(file.get("fileName").asText().equals(uploadJmxFileName) && file.get("fileType").asText().equals("JMX_FILE"));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(7)
public void getTest() {
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().getTestWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("testId").asText().equals(newTestIdAsync));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(8)
public void getAppComponents() {
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().getAppComponentsWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("components").has(defaultAppComponentResourceId) && test.get("components").get(defaultAppComponentResourceId).get("resourceId").asText().equalsIgnoreCase(defaultAppComponentResourceId));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(9)
public void getServerMetricsConfig() {
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().getServerMetricsConfigWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("metrics").has(defaultServerMetricId) && test.get("metrics").get(defaultServerMetricId).get("id").asText().equalsIgnoreCase(defaultServerMetricId));
} catch (Exception e) {
e.printStackTrace();
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(10)
public void listTestFiles() {
PagedFlux<BinaryData> response = adminBuilder.buildAsyncClient().listTestFiles(newTestIdAsync, null);
StepVerifier.create(response).expectNextMatches(fileBinary -> {
try {
JsonNode file = OBJECT_MAPPER.readTree(fileBinary.toString());
if (file.get("fileName").asText().equals(uploadJmxFileName) && file.get("fileType").asText().equals("JMX_FILE")) {
return true;
}
} catch (Exception e) {
}
return false;
}).thenConsumeWhile(fileBinary -> true).verifyComplete();
}
@Test
@Order(11)
public void listTests() {
RequestOptions reqOpts = new RequestOptions()
.addQueryParam("orderBy", "lastModifiedDateTime desc");
PagedFlux<BinaryData> response = adminBuilder.buildAsyncClient().listTests(reqOpts);
StepVerifier.create(response).expectNextMatches(testBinary -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(testBinary.toString());
return test.get("testId").asText().equals(newTestIdAsync);
} catch (Exception e) {
return false;
}
}).thenConsumeWhile(fileBinary -> true).verifyComplete();
}
@Test
@Order(12)
public void deleteTestFile() {
StepVerifier.create(adminBuilder.buildAsyncClient().deleteTestFileWithResponse(newTestIdAsync, uploadCsvFileName, null)).expectNextCount(1).verifyComplete();
StepVerifier.create(adminBuilder.buildAsyncClient().deleteTestFileWithResponse(newTestIdAsync, uploadJmxFileName, null)).expectNextCount(1).verifyComplete();
}
@Test
@Order(13)
public void deleteTest() {
StepVerifier.create(adminBuilder.buildAsyncClient().deleteTestWithResponse(newTestIdAsync, null)).expectNextCount(1).verifyComplete();
}
} | class LoadTestAdministrationAsyncTests extends LoadTestingClientTestBase {
private Map<String, Object> getTestBodyFromDict() {
Map<String, Object> testMap = new HashMap<String, Object>();
testMap.put("displayName", "Java SDK Sample Test Async");
testMap.put("description", "Sample Test Async");
Map<String, Object> loadTestConfigMap = new HashMap<String, Object>();
loadTestConfigMap.put("engineInstances", 1);
testMap.put("loadTestConfiguration", loadTestConfigMap);
Map<String, Object> envVarMap = new HashMap<String, Object>();
envVarMap.put("threads_per_engine", 1);
envVarMap.put("ramp_up_time", 0);
envVarMap.put("duration_in_sec", 10);
envVarMap.put("domain", "azure.microsoft.com");
envVarMap.put("protocol", "https");
testMap.put("environmentVariables", envVarMap);
return testMap;
}
private BinaryData getFileBodyFromResource(String fileName) {
URL url = LoadTestAdministrationAsyncTests.class.getClassLoader().getResource(fileName);
return BinaryData.fromFile(new File(url.getPath()).toPath());
}
@Test
@Order(1)
@Test
@Order(2)
public void beginUploadTestFileAdditionalFiles() {
BinaryData file = getFileBodyFromResource(uploadCsvFileName);
RequestOptions requestOptions = new RequestOptions().addQueryParam("fileType", "ADDITIONAL_ARTIFACTS");
PollerFlux<BinaryData, BinaryData> poller = getLoadTestAdministrationAsyncClient().beginUploadTestFile(
newTestIdAsync,
uploadCsvFileName,
file,
requestOptions);
poller = setPlaybackPollerFluxPollInterval(poller);
StepVerifier.create(poller.last()).assertNext(pollResponse -> {
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}).verifyComplete();
}
@Test
@Order(3)
public void beginUploadTestFileTestScript() {
BinaryData file = getFileBodyFromResource(uploadJmxFileName);
RequestOptions fileUploadRequestOptions = new RequestOptions().addQueryParam("fileType", "JMX_FILE");
PollerFlux<BinaryData, BinaryData> poller = getLoadTestAdministrationAsyncClient()
.beginUploadTestFile(newTestIdAsync, uploadJmxFileName, file, fileUploadRequestOptions);
poller = setPlaybackPollerFluxPollInterval(poller);
StepVerifier.create(poller.takeUntil(pollResponse -> pollResponse.getStatus().isComplete()).last()
.flatMap(AsyncPollResponse::getFinalResult)).assertNext(fileBinary -> {
try {
JsonNode fileNode = OBJECT_MAPPER.readTree(fileBinary.toString());
String validationStatus = fileNode.get("validationStatus").asText();
Assertions.assertTrue(fileNode.get("fileName").asText().equals(uploadJmxFileName)
&& "VALIDATION_SUCCESS".equals(validationStatus));
} catch (Exception e) {
Assertions.assertTrue(false);
}
}).verifyComplete();
}
@Test
@Order(4)
public void createOrUpdateAppComponents() {
BinaryData body = BinaryData.fromObject(getAppComponentBodyFromDict());
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.createOrUpdateAppComponentsWithResponse(
newTestIdAsync,
body,
null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
}
@Test
@Order(5)
public void createOrUpdateServerMetricsConfig() {
BinaryData body = BinaryData.fromObject(getServerMetricsBodyFromDict());
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.createOrUpdateServerMetricsConfigWithResponse(
newTestIdAsync,
body,
null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
}
@Test
@Order(6)
public void getTestFile() {
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.getTestFileWithResponse(newTestIdAsync, uploadJmxFileName, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode file = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(file.get("fileName").asText().equals(uploadJmxFileName)
&& file.get("fileType").asText().equals("JMX_FILE"));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(7)
public void getTest() {
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.getTestWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("testId").asText().equals(newTestIdAsync));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(8)
public void getAppComponents() {
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.getAppComponentsWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("components").has(defaultAppComponentResourceId)
&& test.get("components").get(defaultAppComponentResourceId).get("resourceId").asText()
.equalsIgnoreCase(defaultAppComponentResourceId));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(9)
public void getServerMetricsConfig() {
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.getServerMetricsConfigWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("metrics").has(defaultServerMetricId) && test.get("metrics")
.get(defaultServerMetricId).get("id").asText().equalsIgnoreCase(defaultServerMetricId));
} catch (Exception e) {
e.printStackTrace();
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(10)
public void listTestFiles() {
PagedFlux<BinaryData> response = getLoadTestAdministrationAsyncClient().listTestFiles(newTestIdAsync, null);
StepVerifier.create(response).expectNextMatches(fileBinary -> {
try {
JsonNode file = OBJECT_MAPPER.readTree(fileBinary.toString());
if (file.get("fileName").asText().equals(uploadJmxFileName)
&& file.get("fileType").asText().equals("JMX_FILE")) {
return true;
}
} catch (Exception e) {
}
return false;
}).thenConsumeWhile(fileBinary -> true).verifyComplete();
}
@Test
@Order(11)
public void listTests() {
RequestOptions reqOpts = new RequestOptions()
.addQueryParam("orderBy", "lastModifiedDateTime desc");
PagedFlux<BinaryData> response = getLoadTestAdministrationAsyncClient().listTests(reqOpts);
StepVerifier.create(response).expectNextMatches(testBinary -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(testBinary.toString());
return test.get("testId").asText().equals(newTestIdAsync);
} catch (Exception e) {
return false;
}
}).thenConsumeWhile(fileBinary -> true).verifyComplete();
}
@Test
@Order(12)
public void deleteTestFile() {
StepVerifier.create(getLoadTestAdministrationAsyncClient().deleteTestFileWithResponse(newTestIdAsync,
uploadCsvFileName, null)).expectNextCount(1).verifyComplete();
StepVerifier.create(getLoadTestAdministrationAsyncClient().deleteTestFileWithResponse(newTestIdAsync,
uploadJmxFileName, null)).expectNextCount(1).verifyComplete();
}
@Test
@Order(13)
public void deleteTest() {
StepVerifier.create(getLoadTestAdministrationAsyncClient().deleteTestWithResponse(newTestIdAsync, null))
.expectNextCount(1).verifyComplete();
}
} |
Added AssertingClient | public void createOrUpdateTest() {
BinaryData body = BinaryData.fromObject(getTestBodyFromDict());
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().createOrUpdateTestWithResponse(newTestIdAsync, body, null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
} | Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().createOrUpdateTestWithResponse(newTestIdAsync, body, null); | public void createOrUpdateTest() {
BinaryData body = BinaryData.fromObject(getTestBodyFromDict());
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.createOrUpdateTestWithResponse(newTestIdAsync, body, null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
} | class LoadTestAdministrationAsyncTests extends LoadTestingClientTestBase {
private Map<String, Object> getTestBodyFromDict() {
Map<String, Object> testMap = new HashMap<String, Object>();
testMap.put("displayName", "Java SDK Sample Test Async");
testMap.put("description", "Sample Test Async");
Map<String, Object> loadTestConfigMap = new HashMap<String, Object>();
loadTestConfigMap.put("engineInstances", 1);
testMap.put("loadTestConfiguration", loadTestConfigMap);
Map<String, Object> envVarMap = new HashMap<String, Object>();
envVarMap.put("threads_per_engine", 1);
envVarMap.put("ramp_up_time", 0);
envVarMap.put("duration_in_sec", 10);
envVarMap.put("domain", "azure.microsoft.com");
envVarMap.put("protocol", "https");
testMap.put("environmentVariables", envVarMap);
return testMap;
}
private BinaryData getFileBodyFromResource(String fileName) {
URL url = LoadTestAdministrationAsyncTests.class.getClassLoader().getResource(fileName);
return BinaryData.fromFile(new File(url.getPath()).toPath());
}
@Test
@Order(1)
@Test
@Order(2)
public void beginUploadTestFileAdditionalFiles() {
BinaryData file = getFileBodyFromResource(uploadCsvFileName);
RequestOptions requestOptions = new RequestOptions().addQueryParam("fileType", "ADDITIONAL_ARTIFACTS");
PollerFlux<BinaryData, BinaryData> poller = adminBuilder.buildAsyncClient().beginUploadTestFile(
newTestIdAsync,
uploadCsvFileName,
file,
requestOptions);
poller = setPlaybackPollerFluxPollInterval(poller);
StepVerifier.create(poller.last()).assertNext(pollResponse -> {
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}).verifyComplete();
}
@Test
@Order(3)
public void beginUploadTestFileTestScript() {
BinaryData file = getFileBodyFromResource(uploadJmxFileName);
RequestOptions fileUploadRequestOptions = new RequestOptions().addQueryParam("fileType", "JMX_FILE");
PollerFlux<BinaryData, BinaryData> poller = adminBuilder.buildAsyncClient().beginUploadTestFile(newTestIdAsync, uploadJmxFileName, file, fileUploadRequestOptions);
poller = setPlaybackPollerFluxPollInterval(poller);
StepVerifier.create(poller.takeUntil(pollResponse -> pollResponse.getStatus().isComplete()).last().flatMap(AsyncPollResponse::getFinalResult)).assertNext(fileBinary -> {
try {
JsonNode fileNode = OBJECT_MAPPER.readTree(fileBinary.toString());
String validationStatus = fileNode.get("validationStatus").asText();
Assertions.assertTrue(fileNode.get("fileName").asText().equals(uploadJmxFileName) && "VALIDATION_SUCCESS".equals(validationStatus));
} catch (Exception e) {
Assertions.assertTrue(false);
}
}).verifyComplete();
}
@Test
@Order(4)
public void createOrUpdateAppComponents() {
BinaryData body = BinaryData.fromObject(getAppComponentBodyFromDict());
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().createOrUpdateAppComponentsWithResponse(
newTestIdAsync,
body,
null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
}
@Test
@Order(5)
public void createOrUpdateServerMetricsConfig() {
BinaryData body = BinaryData.fromObject(getServerMetricsBodyFromDict());
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().createOrUpdateServerMetricsConfigWithResponse(
newTestIdAsync,
body,
null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
}
@Test
@Order(6)
public void getTestFile() {
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().getTestFileWithResponse(newTestIdAsync, uploadJmxFileName, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode file = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(file.get("fileName").asText().equals(uploadJmxFileName) && file.get("fileType").asText().equals("JMX_FILE"));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(7)
public void getTest() {
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().getTestWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("testId").asText().equals(newTestIdAsync));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(8)
public void getAppComponents() {
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().getAppComponentsWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("components").has(defaultAppComponentResourceId) && test.get("components").get(defaultAppComponentResourceId).get("resourceId").asText().equalsIgnoreCase(defaultAppComponentResourceId));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(9)
public void getServerMetricsConfig() {
Mono<Response<BinaryData>> monoResponse = adminBuilder.buildAsyncClient().getServerMetricsConfigWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("metrics").has(defaultServerMetricId) && test.get("metrics").get(defaultServerMetricId).get("id").asText().equalsIgnoreCase(defaultServerMetricId));
} catch (Exception e) {
e.printStackTrace();
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(10)
public void listTestFiles() {
PagedFlux<BinaryData> response = adminBuilder.buildAsyncClient().listTestFiles(newTestIdAsync, null);
StepVerifier.create(response).expectNextMatches(fileBinary -> {
try {
JsonNode file = OBJECT_MAPPER.readTree(fileBinary.toString());
if (file.get("fileName").asText().equals(uploadJmxFileName) && file.get("fileType").asText().equals("JMX_FILE")) {
return true;
}
} catch (Exception e) {
}
return false;
}).thenConsumeWhile(fileBinary -> true).verifyComplete();
}
@Test
@Order(11)
public void listTests() {
RequestOptions reqOpts = new RequestOptions()
.addQueryParam("orderBy", "lastModifiedDateTime desc");
PagedFlux<BinaryData> response = adminBuilder.buildAsyncClient().listTests(reqOpts);
StepVerifier.create(response).expectNextMatches(testBinary -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(testBinary.toString());
return test.get("testId").asText().equals(newTestIdAsync);
} catch (Exception e) {
return false;
}
}).thenConsumeWhile(fileBinary -> true).verifyComplete();
}
@Test
@Order(12)
public void deleteTestFile() {
StepVerifier.create(adminBuilder.buildAsyncClient().deleteTestFileWithResponse(newTestIdAsync, uploadCsvFileName, null)).expectNextCount(1).verifyComplete();
StepVerifier.create(adminBuilder.buildAsyncClient().deleteTestFileWithResponse(newTestIdAsync, uploadJmxFileName, null)).expectNextCount(1).verifyComplete();
}
@Test
@Order(13)
public void deleteTest() {
StepVerifier.create(adminBuilder.buildAsyncClient().deleteTestWithResponse(newTestIdAsync, null)).expectNextCount(1).verifyComplete();
}
} | class LoadTestAdministrationAsyncTests extends LoadTestingClientTestBase {
private Map<String, Object> getTestBodyFromDict() {
Map<String, Object> testMap = new HashMap<String, Object>();
testMap.put("displayName", "Java SDK Sample Test Async");
testMap.put("description", "Sample Test Async");
Map<String, Object> loadTestConfigMap = new HashMap<String, Object>();
loadTestConfigMap.put("engineInstances", 1);
testMap.put("loadTestConfiguration", loadTestConfigMap);
Map<String, Object> envVarMap = new HashMap<String, Object>();
envVarMap.put("threads_per_engine", 1);
envVarMap.put("ramp_up_time", 0);
envVarMap.put("duration_in_sec", 10);
envVarMap.put("domain", "azure.microsoft.com");
envVarMap.put("protocol", "https");
testMap.put("environmentVariables", envVarMap);
return testMap;
}
private BinaryData getFileBodyFromResource(String fileName) {
URL url = LoadTestAdministrationAsyncTests.class.getClassLoader().getResource(fileName);
return BinaryData.fromFile(new File(url.getPath()).toPath());
}
@Test
@Order(1)
@Test
@Order(2)
public void beginUploadTestFileAdditionalFiles() {
BinaryData file = getFileBodyFromResource(uploadCsvFileName);
RequestOptions requestOptions = new RequestOptions().addQueryParam("fileType", "ADDITIONAL_ARTIFACTS");
PollerFlux<BinaryData, BinaryData> poller = getLoadTestAdministrationAsyncClient().beginUploadTestFile(
newTestIdAsync,
uploadCsvFileName,
file,
requestOptions);
poller = setPlaybackPollerFluxPollInterval(poller);
StepVerifier.create(poller.last()).assertNext(pollResponse -> {
Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}).verifyComplete();
}
@Test
@Order(3)
public void beginUploadTestFileTestScript() {
BinaryData file = getFileBodyFromResource(uploadJmxFileName);
RequestOptions fileUploadRequestOptions = new RequestOptions().addQueryParam("fileType", "JMX_FILE");
PollerFlux<BinaryData, BinaryData> poller = getLoadTestAdministrationAsyncClient()
.beginUploadTestFile(newTestIdAsync, uploadJmxFileName, file, fileUploadRequestOptions);
poller = setPlaybackPollerFluxPollInterval(poller);
StepVerifier.create(poller.takeUntil(pollResponse -> pollResponse.getStatus().isComplete()).last()
.flatMap(AsyncPollResponse::getFinalResult)).assertNext(fileBinary -> {
try {
JsonNode fileNode = OBJECT_MAPPER.readTree(fileBinary.toString());
String validationStatus = fileNode.get("validationStatus").asText();
Assertions.assertTrue(fileNode.get("fileName").asText().equals(uploadJmxFileName)
&& "VALIDATION_SUCCESS".equals(validationStatus));
} catch (Exception e) {
Assertions.assertTrue(false);
}
}).verifyComplete();
}
@Test
@Order(4)
public void createOrUpdateAppComponents() {
BinaryData body = BinaryData.fromObject(getAppComponentBodyFromDict());
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.createOrUpdateAppComponentsWithResponse(
newTestIdAsync,
body,
null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
}
@Test
@Order(5)
public void createOrUpdateServerMetricsConfig() {
BinaryData body = BinaryData.fromObject(getServerMetricsBodyFromDict());
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.createOrUpdateServerMetricsConfigWithResponse(
newTestIdAsync,
body,
null);
StepVerifier.create(monoResponse).assertNext(response -> {
Assertions.assertTrue(Arrays.asList(200, 201).contains(response.getStatusCode()));
}).verifyComplete();
}
@Test
@Order(6)
public void getTestFile() {
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.getTestFileWithResponse(newTestIdAsync, uploadJmxFileName, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode file = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(file.get("fileName").asText().equals(uploadJmxFileName)
&& file.get("fileType").asText().equals("JMX_FILE"));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(7)
public void getTest() {
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.getTestWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("testId").asText().equals(newTestIdAsync));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(8)
public void getAppComponents() {
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.getAppComponentsWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("components").has(defaultAppComponentResourceId)
&& test.get("components").get(defaultAppComponentResourceId).get("resourceId").asText()
.equalsIgnoreCase(defaultAppComponentResourceId));
} catch (Exception e) {
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(9)
public void getServerMetricsConfig() {
Mono<Response<BinaryData>> monoResponse = getLoadTestAdministrationAsyncClient()
.getServerMetricsConfigWithResponse(newTestIdAsync, null);
StepVerifier.create(monoResponse).assertNext(response -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(response.getValue().toString());
Assertions.assertTrue(test.get("metrics").has(defaultServerMetricId) && test.get("metrics")
.get(defaultServerMetricId).get("id").asText().equalsIgnoreCase(defaultServerMetricId));
} catch (Exception e) {
e.printStackTrace();
Assertions.assertTrue(false);
}
Assertions.assertEquals(200, response.getStatusCode());
}).verifyComplete();
}
@Test
@Order(10)
public void listTestFiles() {
PagedFlux<BinaryData> response = getLoadTestAdministrationAsyncClient().listTestFiles(newTestIdAsync, null);
StepVerifier.create(response).expectNextMatches(fileBinary -> {
try {
JsonNode file = OBJECT_MAPPER.readTree(fileBinary.toString());
if (file.get("fileName").asText().equals(uploadJmxFileName)
&& file.get("fileType").asText().equals("JMX_FILE")) {
return true;
}
} catch (Exception e) {
}
return false;
}).thenConsumeWhile(fileBinary -> true).verifyComplete();
}
@Test
@Order(11)
public void listTests() {
RequestOptions reqOpts = new RequestOptions()
.addQueryParam("orderBy", "lastModifiedDateTime desc");
PagedFlux<BinaryData> response = getLoadTestAdministrationAsyncClient().listTests(reqOpts);
StepVerifier.create(response).expectNextMatches(testBinary -> {
try {
JsonNode test = OBJECT_MAPPER.readTree(testBinary.toString());
return test.get("testId").asText().equals(newTestIdAsync);
} catch (Exception e) {
return false;
}
}).thenConsumeWhile(fileBinary -> true).verifyComplete();
}
@Test
@Order(12)
public void deleteTestFile() {
StepVerifier.create(getLoadTestAdministrationAsyncClient().deleteTestFileWithResponse(newTestIdAsync,
uploadCsvFileName, null)).expectNextCount(1).verifyComplete();
StepVerifier.create(getLoadTestAdministrationAsyncClient().deleteTestFileWithResponse(newTestIdAsync,
uploadJmxFileName, null)).expectNextCount(1).verifyComplete();
}
@Test
@Order(13)
public void deleteTest() {
StepVerifier.create(getLoadTestAdministrationAsyncClient().deleteTestWithResponse(newTestIdAsync, null))
.expectNextCount(1).verifyComplete();
}
} |
can be removed. | private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
} | .skipRequest((ignored1, ignored2) -> false) | private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(new MockTokenCredential());
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} |
can be removed. | private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
} | .skipRequest((ignored1, ignored2) -> false) | private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(new MockTokenCredential());
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} |
```suggestion .credential(new MockTokenCredential()); ``` | private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
} | .credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); | private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(new MockTokenCredential());
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} |
Done | private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
} | .skipRequest((ignored1, ignored2) -> false) | private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(new MockTokenCredential());
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} |
Done | private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
} | .skipRequest((ignored1, ignored2) -> false) | private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(new MockTokenCredential());
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} |
Done | private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
} | .credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); | private LoadTestAdministrationClientBuilder getLoadTestAdministrationClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestAdministrationClientBuilder loadTestAdministrationClientBuilder = new LoadTestAdministrationClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestAdministrationClientBuilder
.credential(new MockTokenCredential());
} else if (getTestMode() == TestMode.RECORD) {
loadTestAdministrationClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestAdministrationClientBuilder.credential(getTokenCredential());
}
return loadTestAdministrationClientBuilder;
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} | class LoadTestingClientTestBase extends TestProxyTestBase {
private static final String URL_REGEX = "(?<=http:\\/\\/|https:\\/\\/)([^\\/?]+)";
private final String defaultEndpoint = "REDACTED.eastus.cnt-prod.loadtesting.azure.com";
protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
protected final String existingTestId = Configuration.getGlobalConfiguration().get("EXISTING_TEST_ID",
"11111111-1234-1234-1234-123456789012");
protected final String newTestId = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22222222-1234-1234-1234-123456789012");
protected final String newTestIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_ID",
"22223333-1234-1234-1234-123456789012");
protected final String newTestRunId = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID",
"33333333-1234-1234-1234-123456789012");
protected final String newTestRunIdAsync = Configuration.getGlobalConfiguration().get("NEW_TEST_RUN_ID_2",
"44444444-1234-1234-1234-123456789012");
protected final String uploadJmxFileName = Configuration.getGlobalConfiguration().get("UPLOAD_JMX_FILE_NAME",
"sample-JMX-file.jmx");
protected final String uploadCsvFileName = Configuration.getGlobalConfiguration().get("UPLOAD_CSV_FILE_NAME",
"additional-data.csv");
protected final String defaultAppComponentResourceId = Configuration.getGlobalConfiguration().get(
"APP_COMPONENT_RESOURCE_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource");
protected final String defaultServerMetricId = Configuration.getGlobalConfiguration().get("SERVER_METRIC_ID",
"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration");
@Override
protected void beforeTest() {
if (getTestMode() != TestMode.LIVE) {
List<TestProxySanitizer> sanitizers = new ArrayList<>();
sanitizers.add(new TestProxySanitizer("Location",
"https:
"https:
sanitizers.add(new TestProxySanitizer(URL_REGEX, "REDACTED", TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(sanitizers);
}
if (getTestMode() == TestMode.PLAYBACK) {
List<TestProxyRequestMatcher> matchers = new ArrayList<>();
matchers.add(new TestProxyRequestMatcher(TestProxyRequestMatcherType.BODILESS));
interceptorManager.addMatchers(matchers);
}
}
protected LoadTestAdministrationClient getLoadTestAdministrationClient() {
return getLoadTestAdministrationClientBuilder(false).buildClient();
}
protected LoadTestAdministrationAsyncClient getLoadTestAdministrationAsyncClient() {
return getLoadTestAdministrationClientBuilder(true).buildAsyncClient();
}
protected LoadTestRunClient getLoadTestRunClient() {
return getLoadTestRunClientBuilder(false).buildClient();
}
protected LoadTestRunAsyncClient getLoadTestRunAsyncClient() {
return getLoadTestRunClientBuilder(true).buildAsyncClient();
}
protected Map<String, Object> getAppComponentBodyFromDict() {
Map<String, Object> appCompMap = new HashMap<String, Object>();
Map<String, Object> compsMap = new HashMap<String, Object>();
Map<String, Object> compMap = new HashMap<String, Object>();
compMap.put("resourceId", defaultAppComponentResourceId);
compMap.put("resourceType", "microsoft.insights/components");
compMap.put("resourceName", "appcomponentresource");
compMap.put("displayName", "Performance_LoadTest_Insights");
compMap.put("kind", "web");
compsMap.put(defaultAppComponentResourceId, compMap);
appCompMap.put("components", compsMap);
return appCompMap;
}
protected Map<String, Object> getServerMetricsBodyFromDict() {
Map<String, Object> serverMetricsMap = new HashMap<String, Object>();
Map<String, Object> metricsMap = new HashMap<String, Object>();
Map<String, Object> metricMap = new HashMap<String, Object>();
metricMap.put("resourceId", defaultAppComponentResourceId);
metricMap.put("metricNamespace", "microsoft.insights/components");
metricMap.put("name", "requests/duration");
metricMap.put("aggregation", "Average");
metricMap.put("resourceType", "microsoft.insights/components");
metricsMap.put(defaultServerMetricId, metricMap);
serverMetricsMap.put("metrics", metricsMap);
return serverMetricsMap;
}
private TokenCredential getTokenCredential() {
DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder();
String authorityHost = Configuration.getGlobalConfiguration().get("AUTHORITY_HOST");
if (authorityHost != null) {
credentialBuilder.authorityHost(authorityHost);
}
return credentialBuilder.build();
}
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
}
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
private HttpClient getTestModeHttpClient() {
HttpClient httpClient;
if (getTestMode() == TestMode.PLAYBACK) {
httpClient = interceptorManager.getPlaybackClient();
} else {
httpClient = HttpClient.createDefault();
}
return httpClient;
}
private LoadTestRunClientBuilder getLoadTestRunClientBuilder(boolean async) {
HttpClient httpClient = getTestModeHttpClient();
if (async) {
httpClient = buildAsyncAssertingClient(httpClient);
} else {
httpClient = buildSyncAssertingClient(httpClient);
}
LoadTestRunClientBuilder loadTestRunClientBuilder = new LoadTestRunClientBuilder()
.endpoint(Configuration.getGlobalConfiguration().get("ENDPOINT", defaultEndpoint))
.httpClient(httpClient)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC));
if (getTestMode() == TestMode.PLAYBACK) {
loadTestRunClientBuilder
.credential(request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)));
} else if (getTestMode() == TestMode.RECORD) {
loadTestRunClientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getTokenCredential());
} else if (getTestMode() == TestMode.LIVE) {
loadTestRunClientBuilder.credential(getTokenCredential());
}
return loadTestRunClientBuilder;
}
} |
Are these just formatting changes here ? | private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
} | header.getValuesList() | private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
} | class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
} | class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
} |
Yeah | private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
} | header.getValuesList() | private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
} | class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
} | class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
} |
Do we have lower tier? Free or Basic? (there is a cost on these resource) Also, use lower TB below (1 or 2? 24 is larger than all my SSD + HD combined) | public void testCreateElasticSan() {
ElasticSan elasticSan = null;
try {
String elasticSanName = "elasticsan" + randomPadding();
elasticSan = elasticSanManager.elasticSans()
.define(elasticSanName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new Sku().withName(SkuName.PREMIUM_LRS).withTier(SkuTier.PREMIUM))
.withBaseSizeTiB(15L)
.withExtendedCapacitySizeTiB(24L)
.create();
elasticSan.refresh();
Assertions.assertEquals(elasticSan.name(), elasticSanName);
Assertions.assertEquals(elasticSan.name(), elasticSanManager.elasticSans().getById(elasticSan.id()).name());
Assertions.assertTrue(elasticSanManager.elasticSans().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (elasticSan != null) {
elasticSanManager.elasticSans().deleteById(elasticSan.id());
}
}
} | .withSku(new Sku().withName(SkuName.PREMIUM_LRS).withTier(SkuTier.PREMIUM)) | public void testCreateElasticSan() {
ElasticSan elasticSan = null;
try {
String elasticSanName = "elasticsan" + randomPadding();
elasticSan = elasticSanManager.elasticSans()
.define(elasticSanName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new Sku().withName(SkuName.PREMIUM_LRS).withTier(SkuTier.PREMIUM))
.withBaseSizeTiB(1L)
.withExtendedCapacitySizeTiB(1L)
.create();
elasticSan.refresh();
Assertions.assertEquals(elasticSan.name(), elasticSanName);
Assertions.assertEquals(elasticSan.name(), elasticSanManager.elasticSans().getById(elasticSan.id()).name());
Assertions.assertTrue(elasticSanManager.elasticSans().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (elasticSan != null) {
elasticSanManager.elasticSans().deleteById(elasticSan.id());
}
}
} | class ElasticSanManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ElasticSanManager elasticSanManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
elasticSanManager = ElasticSanManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ElasticSanManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ElasticSanManager elasticSanManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
elasticSanManager = ElasticSanManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
1. The `ElasticSan` only supports `Premium_LR` and `Premium_ZRS`, `Free` and `Basic` cannot be used. 2. `Also, use lower TB below (1 or 2? 24 is larger than all my SSD + HD combined)` has been fixed in the new version. | public void testCreateElasticSan() {
ElasticSan elasticSan = null;
try {
String elasticSanName = "elasticsan" + randomPadding();
elasticSan = elasticSanManager.elasticSans()
.define(elasticSanName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new Sku().withName(SkuName.PREMIUM_LRS).withTier(SkuTier.PREMIUM))
.withBaseSizeTiB(15L)
.withExtendedCapacitySizeTiB(24L)
.create();
elasticSan.refresh();
Assertions.assertEquals(elasticSan.name(), elasticSanName);
Assertions.assertEquals(elasticSan.name(), elasticSanManager.elasticSans().getById(elasticSan.id()).name());
Assertions.assertTrue(elasticSanManager.elasticSans().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (elasticSan != null) {
elasticSanManager.elasticSans().deleteById(elasticSan.id());
}
}
} | .withSku(new Sku().withName(SkuName.PREMIUM_LRS).withTier(SkuTier.PREMIUM)) | public void testCreateElasticSan() {
ElasticSan elasticSan = null;
try {
String elasticSanName = "elasticsan" + randomPadding();
elasticSan = elasticSanManager.elasticSans()
.define(elasticSanName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new Sku().withName(SkuName.PREMIUM_LRS).withTier(SkuTier.PREMIUM))
.withBaseSizeTiB(1L)
.withExtendedCapacitySizeTiB(1L)
.create();
elasticSan.refresh();
Assertions.assertEquals(elasticSan.name(), elasticSanName);
Assertions.assertEquals(elasticSan.name(), elasticSanManager.elasticSans().getById(elasticSan.id()).name());
Assertions.assertTrue(elasticSanManager.elasticSans().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (elasticSan != null) {
elasticSanManager.elasticSans().deleteById(elasticSan.id());
}
}
} | class ElasticSanManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ElasticSanManager elasticSanManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
elasticSanManager = ElasticSanManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ElasticSanManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ElasticSanManager elasticSanManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
elasticSanManager = ElasticSanManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Delete the identity after this. Does imagebuilder require a user-assigned identity? Can system-assigned identity work? | public void testCreateImageTemplate() {
ImageTemplate imageTemplate = null;
try {
String randomPadding = randomPadding();
String templateName = "template" + randomPadding;
String imageName = "image" + randomPadding;
String identityName = "identity" + randomPadding;
String imageId = resourceManager.resourceGroups().getByName(resourceGroupName).id() + "/providers/Microsoft.Compute/images/" + imageName;
Identity identity = msiManager.identities()
.define(identityName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR)
.create();
Map<String, UserAssignedIdentity> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
imageTemplate = imageBuilderManager.virtualMachineImageTemplates()
.define(templateName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withIdentity(
new ImageTemplateIdentity()
.withType(ResourceIdentityType.USER_ASSIGNED)
.withUserAssignedIdentities(userAssignedIdentities))
.withDistribute(Arrays.asList(
new ImageTemplateManagedImageDistributor()
.withImageId(imageId)
.withLocation(REGION.name())
.withRunOutputName("runOutputManagedImage")
)
)
.withVmProfile(new ImageTemplateVmProfile().withVmSize("Standard_DS1_v2").withOsDiskSizeGB(32))
.withSource(
new ImageTemplatePlatformImageSource()
.withPublisher("canonical")
.withOffer("0001-com-ubuntu-server-focal")
.withSku("20_04-lts-gen2")
.withVersion("latest"))
.withBuildTimeoutInMinutes(0)
.create();
imageTemplate.refresh();
Assertions.assertEquals(imageTemplate.name(), templateName);
Assertions.assertEquals(imageTemplate.name(), imageBuilderManager.virtualMachineImageTemplates().getById(imageTemplate.id()).name());
Assertions.assertTrue(imageBuilderManager.virtualMachineImageTemplates().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (imageTemplate != null) {
imageBuilderManager.virtualMachineImageTemplates().deleteById(imageTemplate.id());
}
}
} | } | public void testCreateImageTemplate() {
ImageTemplate imageTemplate = null;
Identity identity = null;
try {
String randomPadding = randomPadding();
String templateName = "template" + randomPadding;
String imageName = "image" + randomPadding;
String identityName = "identity" + randomPadding;
String imageId = resourceManager.resourceGroups().getByName(resourceGroupName).id() + "/providers/Microsoft.Compute/images/" + imageName;
identity = msiManager.identities()
.define(identityName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR)
.create();
Map<String, UserAssignedIdentity> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
imageTemplate = imageBuilderManager.virtualMachineImageTemplates()
.define(templateName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withIdentity(
new ImageTemplateIdentity()
.withType(ResourceIdentityType.USER_ASSIGNED)
.withUserAssignedIdentities(userAssignedIdentities))
.withDistribute(Arrays.asList(
new ImageTemplateManagedImageDistributor()
.withImageId(imageId)
.withLocation(REGION.name())
.withRunOutputName("runOutputManagedImage")
)
)
.withVmProfile(new ImageTemplateVmProfile().withVmSize("Standard_DS1_v2").withOsDiskSizeGB(32))
.withSource(
new ImageTemplatePlatformImageSource()
.withPublisher("canonical")
.withOffer("0001-com-ubuntu-server-focal")
.withSku("20_04-lts-gen2")
.withVersion("latest"))
.withBuildTimeoutInMinutes(0)
.create();
imageTemplate.refresh();
Assertions.assertEquals(imageTemplate.name(), templateName);
Assertions.assertEquals(imageTemplate.name(), imageBuilderManager.virtualMachineImageTemplates().getById(imageTemplate.id()).name());
Assertions.assertTrue(imageBuilderManager.virtualMachineImageTemplates().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (imageTemplate != null) {
imageBuilderManager.virtualMachineImageTemplates().deleteById(imageTemplate.id());
}
if (identity != null) {
msiManager.identities().deleteById(identity.id());
}
}
} | class ImageBuilderManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ImageBuilderManager imageBuilderManager = null;
private MsiManager msiManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
imageBuilderManager = ImageBuilderManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
msiManager = MsiManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ImageBuilderManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ImageBuilderManager imageBuilderManager = null;
private MsiManager msiManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
imageBuilderManager = ImageBuilderManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
msiManager = MsiManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
1. Yes, user-assigned identity is required to use imagebuilder, system-assigned identity is not supported. 2. `Delete the identity after this.` has been fixed in the new version. | public void testCreateImageTemplate() {
ImageTemplate imageTemplate = null;
try {
String randomPadding = randomPadding();
String templateName = "template" + randomPadding;
String imageName = "image" + randomPadding;
String identityName = "identity" + randomPadding;
String imageId = resourceManager.resourceGroups().getByName(resourceGroupName).id() + "/providers/Microsoft.Compute/images/" + imageName;
Identity identity = msiManager.identities()
.define(identityName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR)
.create();
Map<String, UserAssignedIdentity> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
imageTemplate = imageBuilderManager.virtualMachineImageTemplates()
.define(templateName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withIdentity(
new ImageTemplateIdentity()
.withType(ResourceIdentityType.USER_ASSIGNED)
.withUserAssignedIdentities(userAssignedIdentities))
.withDistribute(Arrays.asList(
new ImageTemplateManagedImageDistributor()
.withImageId(imageId)
.withLocation(REGION.name())
.withRunOutputName("runOutputManagedImage")
)
)
.withVmProfile(new ImageTemplateVmProfile().withVmSize("Standard_DS1_v2").withOsDiskSizeGB(32))
.withSource(
new ImageTemplatePlatformImageSource()
.withPublisher("canonical")
.withOffer("0001-com-ubuntu-server-focal")
.withSku("20_04-lts-gen2")
.withVersion("latest"))
.withBuildTimeoutInMinutes(0)
.create();
imageTemplate.refresh();
Assertions.assertEquals(imageTemplate.name(), templateName);
Assertions.assertEquals(imageTemplate.name(), imageBuilderManager.virtualMachineImageTemplates().getById(imageTemplate.id()).name());
Assertions.assertTrue(imageBuilderManager.virtualMachineImageTemplates().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (imageTemplate != null) {
imageBuilderManager.virtualMachineImageTemplates().deleteById(imageTemplate.id());
}
}
} | } | public void testCreateImageTemplate() {
ImageTemplate imageTemplate = null;
Identity identity = null;
try {
String randomPadding = randomPadding();
String templateName = "template" + randomPadding;
String imageName = "image" + randomPadding;
String identityName = "identity" + randomPadding;
String imageId = resourceManager.resourceGroups().getByName(resourceGroupName).id() + "/providers/Microsoft.Compute/images/" + imageName;
identity = msiManager.identities()
.define(identityName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR)
.create();
Map<String, UserAssignedIdentity> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
imageTemplate = imageBuilderManager.virtualMachineImageTemplates()
.define(templateName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withIdentity(
new ImageTemplateIdentity()
.withType(ResourceIdentityType.USER_ASSIGNED)
.withUserAssignedIdentities(userAssignedIdentities))
.withDistribute(Arrays.asList(
new ImageTemplateManagedImageDistributor()
.withImageId(imageId)
.withLocation(REGION.name())
.withRunOutputName("runOutputManagedImage")
)
)
.withVmProfile(new ImageTemplateVmProfile().withVmSize("Standard_DS1_v2").withOsDiskSizeGB(32))
.withSource(
new ImageTemplatePlatformImageSource()
.withPublisher("canonical")
.withOffer("0001-com-ubuntu-server-focal")
.withSku("20_04-lts-gen2")
.withVersion("latest"))
.withBuildTimeoutInMinutes(0)
.create();
imageTemplate.refresh();
Assertions.assertEquals(imageTemplate.name(), templateName);
Assertions.assertEquals(imageTemplate.name(), imageBuilderManager.virtualMachineImageTemplates().getById(imageTemplate.id()).name());
Assertions.assertTrue(imageBuilderManager.virtualMachineImageTemplates().listByResourceGroup(resourceGroupName).stream().findAny().isPresent());
} finally {
if (imageTemplate != null) {
imageBuilderManager.virtualMachineImageTemplates().deleteById(imageTemplate.id());
}
if (identity != null) {
msiManager.identities().deleteById(identity.id());
}
}
} | class ImageBuilderManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ImageBuilderManager imageBuilderManager = null;
private MsiManager msiManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
imageBuilderManager = ImageBuilderManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
msiManager = MsiManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ImageBuilderManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ImageBuilderManager imageBuilderManager = null;
private MsiManager msiManager = null;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
imageBuilderManager = ImageBuilderManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
msiManager = MsiManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Let's extract this into it's own method as it's the same logic used above and both should be updated at the same time. | public PagedFlux<ConfigurationSetting> listConfigurationSettings(SettingSelector selector) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
final List<MatchConditions> matchConditionsList = selector == null ? null : selector.getMatchConditions();
AtomicInteger pageETagIndex = new AtomicInteger(1);
return new PagedFlux<>(
() -> withContext(context -> {
String firstPageETag = (matchConditionsList == null || matchConditionsList.isEmpty())
? null
: matchConditionsList.get(0).getIfNoneMatch();
return serviceClient.getKeyValuesSinglePageAsync(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
null,
null,
firstPageETag,
addTracingNamespace(context))
.onErrorResume(
HttpResponseException.class,
(Function<Throwable, Mono<PagedResponse<KeyValue>>>) throwable -> {
HttpResponseException e = (HttpResponseException) throwable;
HttpResponse httpResponse = e.getResponse();
if (httpResponse.getStatusCode() == 304) {
String continuationToken = parseNextLink(httpResponse.getHeaderValue("link"));
return Mono.just(
new PagedResponseBase<>(
httpResponse.getRequest(),
httpResponse.getStatusCode(),
httpResponse.getHeaders(),
null,
continuationToken,
null));
}
return Mono.error(throwable);
})
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse));
}),
nextLink -> withContext(context -> {
int pageETagListSize = (matchConditionsList == null || matchConditionsList.isEmpty())
? 0
: matchConditionsList.size();
String nextPageETag = null;
int pageETagIndexValue = pageETagIndex.get();
if (pageETagIndexValue < pageETagListSize) {
nextPageETag = matchConditionsList.get(pageETagIndexValue).getIfNoneMatch();
pageETagIndex.set(pageETagIndexValue + 1);
}
return serviceClient
.getKeyValuesNextSinglePageAsync(
nextLink,
acceptDateTime,
null,
nextPageETag,
addTracingNamespace(context))
.onErrorResume(
HttpResponseException.class,
(Function<Throwable, Mono<PagedResponse<KeyValue>>>) throwable -> {
HttpResponseException e = (HttpResponseException) throwable;
HttpResponse httpResponse = e.getResponse();
String continuationToken = parseNextLink(httpResponse.getHeaderValue("link"));
if (httpResponse.getStatusCode() == 304) {
return Mono.just(
new PagedResponseBase<>(
httpResponse.getRequest(),
httpResponse.getStatusCode(),
httpResponse.getHeaders(),
null,
continuationToken,
null));
}
return Mono.error(throwable);
})
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse));
})
);
} | }) | public PagedFlux<ConfigurationSetting> listConfigurationSettings(SettingSelector selector) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
final List<MatchConditions> matchConditionsList = selector == null ? null : selector.getMatchConditions();
AtomicInteger pageETagIndex = new AtomicInteger(0);
return new PagedFlux<>(
() -> withContext(context -> serviceClient.getKeyValuesSinglePageAsync(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
null,
null,
getPageETag(matchConditionsList, pageETagIndex),
addTracingNamespace(context))
.onErrorResume(HttpResponseException.class,
(Function<HttpResponseException, Mono<PagedResponse<KeyValue>>>) throwable ->
handleNotModifiedErrorToValidResponse(throwable))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(context -> serviceClient.getKeyValuesNextSinglePageAsync(
nextLink,
acceptDateTime,
null,
getPageETag(matchConditionsList, pageETagIndex),
addTracingNamespace(context))
.onErrorResume(HttpResponseException.class,
(Function<HttpResponseException, Mono<PagedResponse<KeyValue>>>) throwable ->
handleNotModifiedErrorToValidResponse(throwable))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse)))
);
} | class ConfigurationAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(ConfigurationAsyncClient.class);
private final AzureAppConfigurationImpl serviceClient;
private final SyncTokenPolicy syncTokenPolicy;
final CreateSnapshotUtilClient createSnapshotUtilClient;
/**
* Creates a ConfigurationAsyncClient that sends requests to the configuration service at {@code serviceEndpoint}.
* Each service call goes through the {@code pipeline}.
*
* @param serviceClient The {@link AzureAppConfigurationImpl} that the client routes its request through.
* @param syncTokenPolicy {@link SyncTokenPolicy} to be used to update the external synchronization token to ensure
* service requests receive up-to-date values.
*/
ConfigurationAsyncClient(AzureAppConfigurationImpl serviceClient, SyncTokenPolicy syncTokenPolicy) {
this.serviceClient = serviceClient;
this.syncTokenPolicy = syncTokenPolicy;
this.createSnapshotUtilClient = new CreateSnapshotUtilClient(serviceClient);
}
/**
* Gets the service endpoint for the Azure App Configuration instance.
*
* @return the service endpoint for the Azure App Configuration instance.
*/
public String getEndpoint() {
return serviceClient.getEndpoint();
}
/**
* Adds a configuration value in the service if that key does not exist. The {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS" and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
* <pre>
* client.addConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
*
* @param key The key of the configuration setting to add.
* @param label The label of the configuration setting to add. If {@code null} no label will be used.
* @param value The value associated with this configuration setting key.
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If a ConfigurationSetting with the same key exists.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> addConfigurationSetting(String key, String label, String value) {
return addConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label).setValue(value));
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
* <pre>
* client.addConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> addConfigurationSetting(ConfigurationSetting setting) {
return addConfigurationSettingWithResponse(setting).map(Response::getValue);
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSettingWithResponse
* <pre>
* client.addConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSettingWithResponse
*
* @param setting The setting to add based on its key and optional label combination.
* @return A REST response containing the {@link ConfigurationSetting} that was created, if a key collision occurs
* or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> addConfigurationSettingWithResponse(ConfigurationSetting setting) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.putKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), null, ETAG_ANY, toKeyValue(settingInternal),
addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Creates or updates a configuration value in the service with the given key. the {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection"</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
* <pre>
* client.setConfigurationSetting&
* .subscribe&
* response.getKey&
* &
* client.setConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
*
* @param key The key of the configuration setting to create or update.
* @param label The label of the configuration setting to create or update, If {@code null} no label will be used.
* @param value The value of this configuration setting.
* @return The {@link ConfigurationSetting} that was created or updated, or an empty Mono if the key is an invalid
* value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setConfigurationSetting(String key, String label, String value) {
return setConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label).setValue(value));
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection"</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
* <pre>
* client.setConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* &
* client.setConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created or updated, or an empty Mono if the key is an invalid
* value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setConfigurationSetting(ConfigurationSetting setting) {
return setConfigurationSettingWithResponse(setting, false).map(Response::getValue);
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* setting's ETag matches. If the ETag's value is equal to the wildcard character ({@code "*"}), the setting will
* always be updated.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSettingWithResponse
* <pre>
* client.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false&
* .subscribe&
* final ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* &
* client.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false&
* .subscribe&
* final ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSettingWithResponse
*
* @param setting The setting to create or update based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @return A REST response containing the {@link ConfigurationSetting} that was created or updated, if the key is an
* invalid value, the setting is read-only, or an ETag was provided but does not match the service's current ETag
* value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If the {@link ConfigurationSetting
* wildcard character, and the current configuration value's ETag does not match, or the setting exists and is
* read-only.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> setConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.putKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), getETag(ifUnchanged, settingInternal), null,
toKeyValue(settingInternal), addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, and the optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(String key, String label) {
return getConfigurationSetting(key, label, null);
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, the optional {@code label}, and the optional
* {@code acceptDateTime} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection" and a time that one minute before now at UTC-Zone</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* "prodDBConnection", "westUS", OffsetDateTime.now&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
*
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(String key, String label, OffsetDateTime acceptDateTime) {
return getConfigurationSettingWithResponse(new ConfigurationSetting().setKey(key).setLabel(label),
acceptDateTime, false).map(Response::getValue);
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection" and a time that one minute before now at UTC-Zone</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
*
* @param setting The setting to retrieve.
*
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(ConfigurationSetting setting) {
return getConfigurationSettingWithResponse(setting, null, false).map(Response::getValue);
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSettingWithResponse
* <pre>
* client.getConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* null,
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSettingWithResponse
*
* @param setting The setting to retrieve.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @param ifChanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* If-None-Match header.
* @return A REST response containing the {@link ConfigurationSetting} stored in the service, or {@code null} if
* didn't exist. {@code null} is also returned if the configuration value does not exist or the key is an invalid
* value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> getConfigurationSettingWithResponse(ConfigurationSetting setting,
OffsetDateTime acceptDateTime, boolean ifChanged) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal ->
serviceClient.getKeyValueWithResponseAsync(settingInternal.getKey(), settingInternal.getLabel(),
acceptDateTime == null ? null : acceptDateTime.toString(), null,
getETag(ifChanged, settingInternal), null, addTracingNamespace(context))
.onErrorResume(
HttpResponseException.class,
(Function<Throwable, Mono<ResponseBase<GetKeyValueHeaders, KeyValue>>>) throwable -> {
HttpResponseException e = (HttpResponseException) throwable;
HttpResponse httpResponse = e.getResponse();
if (httpResponse.getStatusCode() == 304) {
return Mono.just(new ResponseBase<GetKeyValueHeaders, KeyValue>(
httpResponse.getRequest(), httpResponse.getStatusCode(),
httpResponse.getHeaders(), null, null));
}
return Mono.error(throwable);
})
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Deletes the ConfigurationSetting with a matching {@code key} and optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
* <pre>
* client.deleteConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
*
* @param key The key of configuration setting to delete.
* @param label The label of configuration setting to delete. If {@code null} no label will be used.
* @return The deleted ConfigurationSetting or an empty Mono is also returned if the {@code key} is an invalid value
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> deleteConfigurationSetting(String key, String label) {
return deleteConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label));
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
* <pre>
* client.deleteConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
*
* @return The deleted ConfigurationSetting or an empty Mono is also returned if the {@code key} is an invalid value
* (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> deleteConfigurationSetting(ConfigurationSetting setting) {
return deleteConfigurationSettingWithResponse(setting, false).map(Response::getValue);
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key-label "prodDBConnection"-"westUS"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSettingWithResponse
* <pre>
* client.deleteConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSettingWithResponse
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null}
* is also returned if the {@link ConfigurationSetting
* {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> deleteConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged) {
return withContext(context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.deleteKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), getETag(ifUnchanged, settingInternal), addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Sets the read-only status for the {@link ConfigurationSetting} that matches the {@code key}, the optional
* {@code label}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .contextWrite&
* .subscribe&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* @param key The key of configuration setting to set to be read-only.
* @param label The label of configuration setting to read-only. If {@code null} no label will be used.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return The {@link ConfigurationSetting} that is read-only, or an empty Mono if a key collision occurs or the
* key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setReadOnly(String key, String label, boolean isReadOnly) {
return setReadOnly(new ConfigurationSetting().setKey(key).setLabel(label), isReadOnly);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .setKey&
* .setLabel&
* true&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .setKey&
* .setLabel&
* false&
* .subscribe&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
*
* @return The {@link ConfigurationSetting} that is read-only, or an empty Mono if a key collision occurs or the
* key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setReadOnly(ConfigurationSetting setting, boolean isReadOnly) {
return setReadOnlyWithResponse(setting, isReadOnly).map(Response::getValue);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
* <pre>
* client.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* true&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
* <pre>
* client.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return A REST response containing the read-only or not read-only ConfigurationSetting if {@code isReadOnly}
* is true or null, or false respectively. Or return {@code null} if the setting didn't exist.
* {@code null} is also returned if the {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> setReadOnlyWithResponse(ConfigurationSetting setting,
boolean isReadOnly) {
return withContext(context -> validateSettingAsync(setting).flatMap(
settingInternal -> {
final String key = settingInternal.getKey();
final String label = settingInternal.getLabel();
final Context contextInternal = addTracingNamespace(context);
return (isReadOnly
? serviceClient.putLockWithResponseAsync(key, label, null, null, contextInternal)
: serviceClient.deleteLockWithResponseAsync(key, label, null, null, contextInternal))
.map(response -> toConfigurationSettingWithResponse(response));
}));
}
/**
* Fetches the configuration settings that match the {@code selector}. If {@code selector} is {@code null}, then all
* the {@link ConfigurationSetting configuration settings} are fetched with their current values.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all settings that use the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettings -->
* <pre>
* client.listConfigurationSettings&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettings -->
*
* @param selector Optional. Selector to filter configuration setting results from the service.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their
* current values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshot -->
* <pre>
* String snapshotName = "&
* client.listConfigurationSettingsForSnapshot&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshot -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName) {
return listConfigurationSettingsForSnapshot(snapshotName, null);
}
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their
* current values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* List<SettingFields> fields = Arrays.asList&
* client.listConfigurationSettingsForSnapshot&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshotMaxOverload -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @param fields Optional. The fields to select for the query response. If none are set, the service will return the
* ConfigurationSettings with a default set of properties.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName,
List<SettingFields> fields) {
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getKeyValuesSinglePageAsync(
null,
null,
null,
null,
fields,
snapshotName,
null,
null,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(
context -> serviceClient.getKeyValuesNextSinglePageAsync(
nextLink,
null,
null,
null,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse)))
);
}
/**
* Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided
* in descending order from their {@link ConfigurationSetting
* Revisions expire after a period of time, see <a href="https:
* for more information.
*
* If {@code selector} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched
* in their current state. Otherwise, the results returned match the parameters given in {@code selector}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions -->
* <pre>
* client.listRevisions&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions -->
*
* @param selector Optional. Used to filter configuration setting revisions from the service.
* @return Revisions of the ConfigurationSetting
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listRevisions(SettingSelector selector) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getRevisionsSinglePageAsync(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(
context ->
serviceClient.getRevisionsNextSinglePageAsync(nextLink, acceptDateTime,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))));
}
/**
* Create a {@link ConfigurationSnapshot} by providing a snapshot name and a
* {@link ConfigurationSnapshot}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.beginCreateSnapshotMaxOverload -->
* <pre>
* List<ConfigurationSettingsFilter> filters = new ArrayList<>&
* &
* filters.add&
* String snapshotName = "&
* client.beginCreateSnapshot&
* .setRetentionPeriod&
* .flatMap&
* .subscribe&
* snapshot -> System.out.printf&
* snapshot.getName&
* ex -> System.out.printf&
* ex.getMessage&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.beginCreateSnapshotMaxOverload -->
*
* @param snapshotName The name of the {@link ConfigurationSnapshot} to create.
* @param snapshot The {@link ConfigurationSnapshot} to create.
* @return A {@link PollerFlux} that polls the creating snapshot operation until it has completed or
* has failed. The completed operation returns a {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public PollerFlux<PollOperationDetails, ConfigurationSnapshot> beginCreateSnapshot(
String snapshotName, ConfigurationSnapshot snapshot) {
return createSnapshotUtilClient.beginCreateSnapshot(snapshotName, snapshot);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.getSnapshot&
* getSnapshot -> &
* System.out.printf&
* getSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> getSnapshot(String snapshotName) {
return getSnapshotWithResponse(snapshotName, null).map(Response::getValue);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByNameMaxOverload -->
* <pre>
* String snapshotName = "&
*
* client.getSnapshotWithResponse&
* SnapshotFields.STATUS, SnapshotFields.FILTERS&
* .subscribe&
* response -> &
* ConfigurationSnapshot getSnapshot = response.getValue&
* &
* &
* System.out.printf&
* getSnapshot.getName&
* List<ConfigurationSettingsFilter> filters = getSnapshot.getFilters&
* for &
* System.out.printf&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByNameMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param fields Used to select what fields are present in the returned resource(s).
* @return A {@link Mono} of {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> getSnapshotWithResponse(String snapshotName,
List<SnapshotFields> fields) {
return serviceClient.getSnapshotWithResponseAsync(snapshotName, null, null, fields, Context.NONE)
.map(response -> new SimpleResponse<>(response, response.getValue()));
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.archiveSnapshot&
* archivedSnapshot -> &
* System.out.printf&
* archivedSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotByName -->
*
* @param snapshotName The snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> archiveSnapshot(String snapshotName) {
return updateSnapshotAsync(snapshotName, null, ConfigurationSnapshotStatus.ARCHIVED, serviceClient)
.map(Response::getValue);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* client.archiveSnapshotWithResponse&
* .subscribe&
* response -> &
* ConfigurationSnapshot archivedSnapshot = response.getValue&
* System.out.printf&
* archivedSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> archiveSnapshotWithResponse(String snapshotName,
MatchConditions matchConditions) {
return updateSnapshotAsync(snapshotName, matchConditions, ConfigurationSnapshotStatus.ARCHIVED, serviceClient);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.recoverSnapshot&
* recoveredSnapshot -> &
* System.out.printf&
* recoveredSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotByName -->
*
* @param snapshotName The snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> recoverSnapshot(String snapshotName) {
return updateSnapshotAsync(snapshotName, null, ConfigurationSnapshotStatus.READY, serviceClient)
.map(Response::getValue);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* client.recoverSnapshotWithResponse&
* response -> &
* ConfigurationSnapshot recoveredSnapshot = response.getValue&
* System.out.printf&
* recoveredSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> recoverSnapshotWithResponse(
String snapshotName, MatchConditions matchConditions) {
return updateSnapshotAsync(snapshotName, matchConditions, ConfigurationSnapshotStatus.READY, serviceClient);
}
/**
* List snapshots by given {@link SnapshotSelector}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listSnapshots -->
* <pre>
* String snapshotNameFilter = "&
* client.listSnapshots&
* .subscribe&
* System.out.printf&
* recoveredSnapshot.getName&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listSnapshots -->
*
* @param selector Optional. Used to filter {@link ConfigurationSnapshot} from the service.
* @return A {@link PagedFlux} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSnapshot> listSnapshots(SnapshotSelector selector) {
try {
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getSnapshotsSinglePageAsync(
selector == null ? null : selector.getNameFilter(),
null,
selector == null ? null : selector.getFields(),
selector == null ? null : selector.getStatus(),
addTracingNamespace(context))),
nextLink -> withContext(
context -> serviceClient.getSnapshotsNextSinglePageAsync(nextLink, addTracingNamespace(context)))
);
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(LOGGER, ex));
}
}
/**
* Adds an external synchronization token to ensure service requests receive up-to-date values.
*
* @param token an external synchronization token to ensure service requests receive up-to-date values.
* @throws NullPointerException if the given token is null.
*/
public void updateSyncToken(String token) {
Objects.requireNonNull(token, "'token' cannot be null.");
syncTokenPolicy.updateSyncToken(token);
}
} | class ConfigurationAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(ConfigurationAsyncClient.class);
private final AzureAppConfigurationImpl serviceClient;
private final SyncTokenPolicy syncTokenPolicy;
final CreateSnapshotUtilClient createSnapshotUtilClient;
/**
* Creates a ConfigurationAsyncClient that sends requests to the configuration service at {@code serviceEndpoint}.
* Each service call goes through the {@code pipeline}.
*
* @param serviceClient The {@link AzureAppConfigurationImpl} that the client routes its request through.
* @param syncTokenPolicy {@link SyncTokenPolicy} to be used to update the external synchronization token to ensure
* service requests receive up-to-date values.
*/
ConfigurationAsyncClient(AzureAppConfigurationImpl serviceClient, SyncTokenPolicy syncTokenPolicy) {
this.serviceClient = serviceClient;
this.syncTokenPolicy = syncTokenPolicy;
this.createSnapshotUtilClient = new CreateSnapshotUtilClient(serviceClient);
}
/**
* Gets the service endpoint for the Azure App Configuration instance.
*
* @return the service endpoint for the Azure App Configuration instance.
*/
public String getEndpoint() {
return serviceClient.getEndpoint();
}
/**
* Adds a configuration value in the service if that key does not exist. The {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS" and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
* <pre>
* client.addConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
*
* @param key The key of the configuration setting to add.
* @param label The label of the configuration setting to add. If {@code null} no label will be used.
* @param value The value associated with this configuration setting key.
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If a ConfigurationSetting with the same key exists.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> addConfigurationSetting(String key, String label, String value) {
return addConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label).setValue(value));
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
* <pre>
* client.addConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> addConfigurationSetting(ConfigurationSetting setting) {
return addConfigurationSettingWithResponse(setting).map(Response::getValue);
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSettingWithResponse
* <pre>
* client.addConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSettingWithResponse
*
* @param setting The setting to add based on its key and optional label combination.
* @return A REST response containing the {@link ConfigurationSetting} that was created, if a key collision occurs
* or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> addConfigurationSettingWithResponse(ConfigurationSetting setting) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.putKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), null, ETAG_ANY, toKeyValue(settingInternal),
addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Creates or updates a configuration value in the service with the given key. the {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection"</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
* <pre>
* client.setConfigurationSetting&
* .subscribe&
* response.getKey&
* &
* client.setConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
*
* @param key The key of the configuration setting to create or update.
* @param label The label of the configuration setting to create or update, If {@code null} no label will be used.
* @param value The value of this configuration setting.
* @return The {@link ConfigurationSetting} that was created or updated, or an empty Mono if the key is an invalid
* value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setConfigurationSetting(String key, String label, String value) {
return setConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label).setValue(value));
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection"</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
* <pre>
* client.setConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* &
* client.setConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created or updated, or an empty Mono if the key is an invalid
* value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setConfigurationSetting(ConfigurationSetting setting) {
return setConfigurationSettingWithResponse(setting, false).map(Response::getValue);
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* setting's ETag matches. If the ETag's value is equal to the wildcard character ({@code "*"}), the setting will
* always be updated.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSettingWithResponse
* <pre>
* client.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false&
* .subscribe&
* final ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* &
* client.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false&
* .subscribe&
* final ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSettingWithResponse
*
* @param setting The setting to create or update based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @return A REST response containing the {@link ConfigurationSetting} that was created or updated, if the key is an
* invalid value, the setting is read-only, or an ETag was provided but does not match the service's current ETag
* value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If the {@link ConfigurationSetting
* wildcard character, and the current configuration value's ETag does not match, or the setting exists and is
* read-only.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> setConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.putKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), getETag(ifUnchanged, settingInternal), null,
toKeyValue(settingInternal), addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, and the optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(String key, String label) {
return getConfigurationSetting(key, label, null);
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, the optional {@code label}, and the optional
* {@code acceptDateTime} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection" and a time that one minute before now at UTC-Zone</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* "prodDBConnection", "westUS", OffsetDateTime.now&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
*
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(String key, String label, OffsetDateTime acceptDateTime) {
return getConfigurationSettingWithResponse(new ConfigurationSetting().setKey(key).setLabel(label),
acceptDateTime, false).map(Response::getValue);
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection" and a time that one minute before now at UTC-Zone</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
*
* @param setting The setting to retrieve.
*
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(ConfigurationSetting setting) {
return getConfigurationSettingWithResponse(setting, null, false).map(Response::getValue);
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSettingWithResponse
* <pre>
* client.getConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* null,
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSettingWithResponse
*
* @param setting The setting to retrieve.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @param ifChanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* If-None-Match header.
* @return A REST response containing the {@link ConfigurationSetting} stored in the service, or {@code null} if
* didn't exist. {@code null} is also returned if the configuration value does not exist or the key is an invalid
* value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> getConfigurationSettingWithResponse(ConfigurationSetting setting,
OffsetDateTime acceptDateTime, boolean ifChanged) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal ->
serviceClient.getKeyValueWithResponseAsync(settingInternal.getKey(), settingInternal.getLabel(),
acceptDateTime == null ? null : acceptDateTime.toString(), null,
getETag(ifChanged, settingInternal), null, addTracingNamespace(context))
.onErrorResume(
HttpResponseException.class,
(Function<Throwable, Mono<ResponseBase<GetKeyValueHeaders, KeyValue>>>) throwable -> {
HttpResponseException e = (HttpResponseException) throwable;
HttpResponse httpResponse = e.getResponse();
if (httpResponse.getStatusCode() == 304) {
return Mono.just(new ResponseBase<GetKeyValueHeaders, KeyValue>(
httpResponse.getRequest(), httpResponse.getStatusCode(),
httpResponse.getHeaders(), null, null));
}
return Mono.error(throwable);
})
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Deletes the ConfigurationSetting with a matching {@code key} and optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
* <pre>
* client.deleteConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
*
* @param key The key of configuration setting to delete.
* @param label The label of configuration setting to delete. If {@code null} no label will be used.
* @return The deleted ConfigurationSetting or an empty Mono is also returned if the {@code key} is an invalid value
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> deleteConfigurationSetting(String key, String label) {
return deleteConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label));
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
* <pre>
* client.deleteConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
*
* @return The deleted ConfigurationSetting or an empty Mono is also returned if the {@code key} is an invalid value
* (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> deleteConfigurationSetting(ConfigurationSetting setting) {
return deleteConfigurationSettingWithResponse(setting, false).map(Response::getValue);
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key-label "prodDBConnection"-"westUS"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSettingWithResponse
* <pre>
* client.deleteConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSettingWithResponse
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null}
* is also returned if the {@link ConfigurationSetting
* {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> deleteConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged) {
return withContext(context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.deleteKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), getETag(ifUnchanged, settingInternal), addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Sets the read-only status for the {@link ConfigurationSetting} that matches the {@code key}, the optional
* {@code label}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .contextWrite&
* .subscribe&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* @param key The key of configuration setting to set to be read-only.
* @param label The label of configuration setting to read-only. If {@code null} no label will be used.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return The {@link ConfigurationSetting} that is read-only, or an empty Mono if a key collision occurs or the
* key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setReadOnly(String key, String label, boolean isReadOnly) {
return setReadOnly(new ConfigurationSetting().setKey(key).setLabel(label), isReadOnly);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .setKey&
* .setLabel&
* true&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .setKey&
* .setLabel&
* false&
* .subscribe&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
*
* @return The {@link ConfigurationSetting} that is read-only, or an empty Mono if a key collision occurs or the
* key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setReadOnly(ConfigurationSetting setting, boolean isReadOnly) {
return setReadOnlyWithResponse(setting, isReadOnly).map(Response::getValue);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
* <pre>
* client.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* true&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
* <pre>
* client.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return A REST response containing the read-only or not read-only ConfigurationSetting if {@code isReadOnly}
* is true or null, or false respectively. Or return {@code null} if the setting didn't exist.
* {@code null} is also returned if the {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> setReadOnlyWithResponse(ConfigurationSetting setting,
boolean isReadOnly) {
return withContext(context -> validateSettingAsync(setting).flatMap(
settingInternal -> {
final String key = settingInternal.getKey();
final String label = settingInternal.getLabel();
final Context contextInternal = addTracingNamespace(context);
return (isReadOnly
? serviceClient.putLockWithResponseAsync(key, label, null, null, contextInternal)
: serviceClient.deleteLockWithResponseAsync(key, label, null, null, contextInternal))
.map(response -> toConfigurationSettingWithResponse(response));
}));
}
/**
* Fetches the configuration settings that match the {@code selector}. If {@code selector} is {@code null}, then all
* the {@link ConfigurationSetting configuration settings} are fetched with their current values.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all settings that use the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettings -->
* <pre>
* client.listConfigurationSettings&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettings -->
*
* @param selector Optional. Selector to filter configuration setting results from the service.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their
* current values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshot -->
* <pre>
* String snapshotName = "&
* client.listConfigurationSettingsForSnapshot&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshot -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName) {
return listConfigurationSettingsForSnapshot(snapshotName, null);
}
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their
* current values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* List<SettingFields> fields = Arrays.asList&
* client.listConfigurationSettingsForSnapshot&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshotMaxOverload -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @param fields Optional. The fields to select for the query response. If none are set, the service will return the
* ConfigurationSettings with a default set of properties.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName,
List<SettingFields> fields) {
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getKeyValuesSinglePageAsync(
null,
null,
null,
null,
fields,
snapshotName,
null,
null,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(
context -> serviceClient.getKeyValuesNextSinglePageAsync(
nextLink,
null,
null,
null,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse)))
);
}
/**
* Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided
* in descending order from their {@link ConfigurationSetting
* Revisions expire after a period of time, see <a href="https:
* for more information.
*
* If {@code selector} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched
* in their current state. Otherwise, the results returned match the parameters given in {@code selector}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions -->
* <pre>
* client.listRevisions&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions -->
*
* @param selector Optional. Used to filter configuration setting revisions from the service.
* @return Revisions of the ConfigurationSetting
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listRevisions(SettingSelector selector) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getRevisionsSinglePageAsync(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(
context ->
serviceClient.getRevisionsNextSinglePageAsync(nextLink, acceptDateTime,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))));
}
/**
* Create a {@link ConfigurationSnapshot} by providing a snapshot name and a
* {@link ConfigurationSnapshot}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.beginCreateSnapshotMaxOverload -->
* <pre>
* List<ConfigurationSettingsFilter> filters = new ArrayList<>&
* &
* filters.add&
* String snapshotName = "&
* client.beginCreateSnapshot&
* .setRetentionPeriod&
* .flatMap&
* .subscribe&
* snapshot -> System.out.printf&
* snapshot.getName&
* ex -> System.out.printf&
* ex.getMessage&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.beginCreateSnapshotMaxOverload -->
*
* @param snapshotName The name of the {@link ConfigurationSnapshot} to create.
* @param snapshot The {@link ConfigurationSnapshot} to create.
* @return A {@link PollerFlux} that polls the creating snapshot operation until it has completed or
* has failed. The completed operation returns a {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public PollerFlux<PollOperationDetails, ConfigurationSnapshot> beginCreateSnapshot(
String snapshotName, ConfigurationSnapshot snapshot) {
return createSnapshotUtilClient.beginCreateSnapshot(snapshotName, snapshot);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.getSnapshot&
* getSnapshot -> &
* System.out.printf&
* getSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> getSnapshot(String snapshotName) {
return getSnapshotWithResponse(snapshotName, null).map(Response::getValue);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByNameMaxOverload -->
* <pre>
* String snapshotName = "&
*
* client.getSnapshotWithResponse&
* SnapshotFields.STATUS, SnapshotFields.FILTERS&
* .subscribe&
* response -> &
* ConfigurationSnapshot getSnapshot = response.getValue&
* &
* &
* System.out.printf&
* getSnapshot.getName&
* List<ConfigurationSettingsFilter> filters = getSnapshot.getFilters&
* for &
* System.out.printf&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByNameMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param fields Used to select what fields are present in the returned resource(s).
* @return A {@link Mono} of {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> getSnapshotWithResponse(String snapshotName,
List<SnapshotFields> fields) {
return serviceClient.getSnapshotWithResponseAsync(snapshotName, null, null, fields, Context.NONE)
.map(response -> new SimpleResponse<>(response, response.getValue()));
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.archiveSnapshot&
* archivedSnapshot -> &
* System.out.printf&
* archivedSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotByName -->
*
* @param snapshotName The snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> archiveSnapshot(String snapshotName) {
return updateSnapshotAsync(snapshotName, null, ConfigurationSnapshotStatus.ARCHIVED, serviceClient)
.map(Response::getValue);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* client.archiveSnapshotWithResponse&
* .subscribe&
* response -> &
* ConfigurationSnapshot archivedSnapshot = response.getValue&
* System.out.printf&
* archivedSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> archiveSnapshotWithResponse(String snapshotName,
MatchConditions matchConditions) {
return updateSnapshotAsync(snapshotName, matchConditions, ConfigurationSnapshotStatus.ARCHIVED, serviceClient);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.recoverSnapshot&
* recoveredSnapshot -> &
* System.out.printf&
* recoveredSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotByName -->
*
* @param snapshotName The snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> recoverSnapshot(String snapshotName) {
return updateSnapshotAsync(snapshotName, null, ConfigurationSnapshotStatus.READY, serviceClient)
.map(Response::getValue);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* client.recoverSnapshotWithResponse&
* response -> &
* ConfigurationSnapshot recoveredSnapshot = response.getValue&
* System.out.printf&
* recoveredSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> recoverSnapshotWithResponse(
String snapshotName, MatchConditions matchConditions) {
return updateSnapshotAsync(snapshotName, matchConditions, ConfigurationSnapshotStatus.READY, serviceClient);
}
/**
* List snapshots by given {@link SnapshotSelector}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listSnapshots -->
* <pre>
* String snapshotNameFilter = "&
* client.listSnapshots&
* .subscribe&
* System.out.printf&
* recoveredSnapshot.getName&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listSnapshots -->
*
* @param selector Optional. Used to filter {@link ConfigurationSnapshot} from the service.
* @return A {@link PagedFlux} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSnapshot> listSnapshots(SnapshotSelector selector) {
try {
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getSnapshotsSinglePageAsync(
selector == null ? null : selector.getNameFilter(),
null,
selector == null ? null : selector.getFields(),
selector == null ? null : selector.getStatus(),
addTracingNamespace(context))),
nextLink -> withContext(
context -> serviceClient.getSnapshotsNextSinglePageAsync(nextLink, addTracingNamespace(context)))
);
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(LOGGER, ex));
}
}
/**
* Adds an external synchronization token to ensure service requests receive up-to-date values.
*
* @param token an external synchronization token to ensure service requests receive up-to-date values.
* @throws NullPointerException if the given token is null.
*/
public void updateSyncToken(String token) {
Objects.requireNonNull(token, "'token' cannot be null.");
syncTokenPolicy.updateSyncToken(token);
}
} |
Let's turn this into a method that takes the `AtomicInteger` and the list of `MatchConditions` and returns the ETag. This can be shared between the initial page retrieval and next page retrieval, will just need to start the AtomicInteger at 0. | public PagedFlux<ConfigurationSetting> listConfigurationSettings(SettingSelector selector) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
final List<MatchConditions> matchConditionsList = selector == null ? null : selector.getMatchConditions();
AtomicInteger pageETagIndex = new AtomicInteger(1);
return new PagedFlux<>(
() -> withContext(context -> {
String firstPageETag = (matchConditionsList == null || matchConditionsList.isEmpty())
? null
: matchConditionsList.get(0).getIfNoneMatch();
return serviceClient.getKeyValuesSinglePageAsync(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
null,
null,
firstPageETag,
addTracingNamespace(context))
.onErrorResume(
HttpResponseException.class,
(Function<Throwable, Mono<PagedResponse<KeyValue>>>) throwable -> {
HttpResponseException e = (HttpResponseException) throwable;
HttpResponse httpResponse = e.getResponse();
if (httpResponse.getStatusCode() == 304) {
String continuationToken = parseNextLink(httpResponse.getHeaderValue("link"));
return Mono.just(
new PagedResponseBase<>(
httpResponse.getRequest(),
httpResponse.getStatusCode(),
httpResponse.getHeaders(),
null,
continuationToken,
null));
}
return Mono.error(throwable);
})
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse));
}),
nextLink -> withContext(context -> {
int pageETagListSize = (matchConditionsList == null || matchConditionsList.isEmpty())
? 0
: matchConditionsList.size();
String nextPageETag = null;
int pageETagIndexValue = pageETagIndex.get();
if (pageETagIndexValue < pageETagListSize) {
nextPageETag = matchConditionsList.get(pageETagIndexValue).getIfNoneMatch();
pageETagIndex.set(pageETagIndexValue + 1);
}
return serviceClient
.getKeyValuesNextSinglePageAsync(
nextLink,
acceptDateTime,
null,
nextPageETag,
addTracingNamespace(context))
.onErrorResume(
HttpResponseException.class,
(Function<Throwable, Mono<PagedResponse<KeyValue>>>) throwable -> {
HttpResponseException e = (HttpResponseException) throwable;
HttpResponse httpResponse = e.getResponse();
String continuationToken = parseNextLink(httpResponse.getHeaderValue("link"));
if (httpResponse.getStatusCode() == 304) {
return Mono.just(
new PagedResponseBase<>(
httpResponse.getRequest(),
httpResponse.getStatusCode(),
httpResponse.getHeaders(),
null,
continuationToken,
null));
}
return Mono.error(throwable);
})
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse));
})
);
} | } | public PagedFlux<ConfigurationSetting> listConfigurationSettings(SettingSelector selector) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
final List<MatchConditions> matchConditionsList = selector == null ? null : selector.getMatchConditions();
AtomicInteger pageETagIndex = new AtomicInteger(0);
return new PagedFlux<>(
() -> withContext(context -> serviceClient.getKeyValuesSinglePageAsync(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
null,
null,
getPageETag(matchConditionsList, pageETagIndex),
addTracingNamespace(context))
.onErrorResume(HttpResponseException.class,
(Function<HttpResponseException, Mono<PagedResponse<KeyValue>>>) throwable ->
handleNotModifiedErrorToValidResponse(throwable))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(context -> serviceClient.getKeyValuesNextSinglePageAsync(
nextLink,
acceptDateTime,
null,
getPageETag(matchConditionsList, pageETagIndex),
addTracingNamespace(context))
.onErrorResume(HttpResponseException.class,
(Function<HttpResponseException, Mono<PagedResponse<KeyValue>>>) throwable ->
handleNotModifiedErrorToValidResponse(throwable))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse)))
);
} | class ConfigurationAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(ConfigurationAsyncClient.class);
private final AzureAppConfigurationImpl serviceClient;
private final SyncTokenPolicy syncTokenPolicy;
final CreateSnapshotUtilClient createSnapshotUtilClient;
/**
* Creates a ConfigurationAsyncClient that sends requests to the configuration service at {@code serviceEndpoint}.
* Each service call goes through the {@code pipeline}.
*
* @param serviceClient The {@link AzureAppConfigurationImpl} that the client routes its request through.
* @param syncTokenPolicy {@link SyncTokenPolicy} to be used to update the external synchronization token to ensure
* service requests receive up-to-date values.
*/
ConfigurationAsyncClient(AzureAppConfigurationImpl serviceClient, SyncTokenPolicy syncTokenPolicy) {
this.serviceClient = serviceClient;
this.syncTokenPolicy = syncTokenPolicy;
this.createSnapshotUtilClient = new CreateSnapshotUtilClient(serviceClient);
}
/**
* Gets the service endpoint for the Azure App Configuration instance.
*
* @return the service endpoint for the Azure App Configuration instance.
*/
public String getEndpoint() {
return serviceClient.getEndpoint();
}
/**
* Adds a configuration value in the service if that key does not exist. The {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS" and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
* <pre>
* client.addConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
*
* @param key The key of the configuration setting to add.
* @param label The label of the configuration setting to add. If {@code null} no label will be used.
* @param value The value associated with this configuration setting key.
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If a ConfigurationSetting with the same key exists.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> addConfigurationSetting(String key, String label, String value) {
return addConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label).setValue(value));
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
* <pre>
* client.addConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> addConfigurationSetting(ConfigurationSetting setting) {
return addConfigurationSettingWithResponse(setting).map(Response::getValue);
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSettingWithResponse
* <pre>
* client.addConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSettingWithResponse
*
* @param setting The setting to add based on its key and optional label combination.
* @return A REST response containing the {@link ConfigurationSetting} that was created, if a key collision occurs
* or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> addConfigurationSettingWithResponse(ConfigurationSetting setting) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.putKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), null, ETAG_ANY, toKeyValue(settingInternal),
addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Creates or updates a configuration value in the service with the given key. the {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection"</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
* <pre>
* client.setConfigurationSetting&
* .subscribe&
* response.getKey&
* &
* client.setConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
*
* @param key The key of the configuration setting to create or update.
* @param label The label of the configuration setting to create or update, If {@code null} no label will be used.
* @param value The value of this configuration setting.
* @return The {@link ConfigurationSetting} that was created or updated, or an empty Mono if the key is an invalid
* value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setConfigurationSetting(String key, String label, String value) {
return setConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label).setValue(value));
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection"</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
* <pre>
* client.setConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* &
* client.setConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created or updated, or an empty Mono if the key is an invalid
* value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setConfigurationSetting(ConfigurationSetting setting) {
return setConfigurationSettingWithResponse(setting, false).map(Response::getValue);
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* setting's ETag matches. If the ETag's value is equal to the wildcard character ({@code "*"}), the setting will
* always be updated.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSettingWithResponse
* <pre>
* client.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false&
* .subscribe&
* final ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* &
* client.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false&
* .subscribe&
* final ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSettingWithResponse
*
* @param setting The setting to create or update based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @return A REST response containing the {@link ConfigurationSetting} that was created or updated, if the key is an
* invalid value, the setting is read-only, or an ETag was provided but does not match the service's current ETag
* value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If the {@link ConfigurationSetting
* wildcard character, and the current configuration value's ETag does not match, or the setting exists and is
* read-only.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> setConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.putKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), getETag(ifUnchanged, settingInternal), null,
toKeyValue(settingInternal), addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, and the optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(String key, String label) {
return getConfigurationSetting(key, label, null);
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, the optional {@code label}, and the optional
* {@code acceptDateTime} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection" and a time that one minute before now at UTC-Zone</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* "prodDBConnection", "westUS", OffsetDateTime.now&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
*
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(String key, String label, OffsetDateTime acceptDateTime) {
return getConfigurationSettingWithResponse(new ConfigurationSetting().setKey(key).setLabel(label),
acceptDateTime, false).map(Response::getValue);
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection" and a time that one minute before now at UTC-Zone</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
*
* @param setting The setting to retrieve.
*
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(ConfigurationSetting setting) {
return getConfigurationSettingWithResponse(setting, null, false).map(Response::getValue);
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSettingWithResponse
* <pre>
* client.getConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* null,
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSettingWithResponse
*
* @param setting The setting to retrieve.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @param ifChanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* If-None-Match header.
* @return A REST response containing the {@link ConfigurationSetting} stored in the service, or {@code null} if
* didn't exist. {@code null} is also returned if the configuration value does not exist or the key is an invalid
* value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> getConfigurationSettingWithResponse(ConfigurationSetting setting,
OffsetDateTime acceptDateTime, boolean ifChanged) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal ->
serviceClient.getKeyValueWithResponseAsync(settingInternal.getKey(), settingInternal.getLabel(),
acceptDateTime == null ? null : acceptDateTime.toString(), null,
getETag(ifChanged, settingInternal), null, addTracingNamespace(context))
.onErrorResume(
HttpResponseException.class,
(Function<Throwable, Mono<ResponseBase<GetKeyValueHeaders, KeyValue>>>) throwable -> {
HttpResponseException e = (HttpResponseException) throwable;
HttpResponse httpResponse = e.getResponse();
if (httpResponse.getStatusCode() == 304) {
return Mono.just(new ResponseBase<GetKeyValueHeaders, KeyValue>(
httpResponse.getRequest(), httpResponse.getStatusCode(),
httpResponse.getHeaders(), null, null));
}
return Mono.error(throwable);
})
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Deletes the ConfigurationSetting with a matching {@code key} and optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
* <pre>
* client.deleteConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
*
* @param key The key of configuration setting to delete.
* @param label The label of configuration setting to delete. If {@code null} no label will be used.
* @return The deleted ConfigurationSetting or an empty Mono is also returned if the {@code key} is an invalid value
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> deleteConfigurationSetting(String key, String label) {
return deleteConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label));
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
* <pre>
* client.deleteConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
*
* @return The deleted ConfigurationSetting or an empty Mono is also returned if the {@code key} is an invalid value
* (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> deleteConfigurationSetting(ConfigurationSetting setting) {
return deleteConfigurationSettingWithResponse(setting, false).map(Response::getValue);
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key-label "prodDBConnection"-"westUS"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSettingWithResponse
* <pre>
* client.deleteConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSettingWithResponse
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null}
* is also returned if the {@link ConfigurationSetting
* {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> deleteConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged) {
return withContext(context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.deleteKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), getETag(ifUnchanged, settingInternal), addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Sets the read-only status for the {@link ConfigurationSetting} that matches the {@code key}, the optional
* {@code label}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .contextWrite&
* .subscribe&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* @param key The key of configuration setting to set to be read-only.
* @param label The label of configuration setting to read-only. If {@code null} no label will be used.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return The {@link ConfigurationSetting} that is read-only, or an empty Mono if a key collision occurs or the
* key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setReadOnly(String key, String label, boolean isReadOnly) {
return setReadOnly(new ConfigurationSetting().setKey(key).setLabel(label), isReadOnly);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .setKey&
* .setLabel&
* true&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .setKey&
* .setLabel&
* false&
* .subscribe&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
*
* @return The {@link ConfigurationSetting} that is read-only, or an empty Mono if a key collision occurs or the
* key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setReadOnly(ConfigurationSetting setting, boolean isReadOnly) {
return setReadOnlyWithResponse(setting, isReadOnly).map(Response::getValue);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
* <pre>
* client.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* true&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
* <pre>
* client.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return A REST response containing the read-only or not read-only ConfigurationSetting if {@code isReadOnly}
* is true or null, or false respectively. Or return {@code null} if the setting didn't exist.
* {@code null} is also returned if the {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> setReadOnlyWithResponse(ConfigurationSetting setting,
boolean isReadOnly) {
return withContext(context -> validateSettingAsync(setting).flatMap(
settingInternal -> {
final String key = settingInternal.getKey();
final String label = settingInternal.getLabel();
final Context contextInternal = addTracingNamespace(context);
return (isReadOnly
? serviceClient.putLockWithResponseAsync(key, label, null, null, contextInternal)
: serviceClient.deleteLockWithResponseAsync(key, label, null, null, contextInternal))
.map(response -> toConfigurationSettingWithResponse(response));
}));
}
/**
* Fetches the configuration settings that match the {@code selector}. If {@code selector} is {@code null}, then all
* the {@link ConfigurationSetting configuration settings} are fetched with their current values.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all settings that use the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettings -->
* <pre>
* client.listConfigurationSettings&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettings -->
*
* @param selector Optional. Selector to filter configuration setting results from the service.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their
* current values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshot -->
* <pre>
* String snapshotName = "&
* client.listConfigurationSettingsForSnapshot&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshot -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName) {
return listConfigurationSettingsForSnapshot(snapshotName, null);
}
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their
* current values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* List<SettingFields> fields = Arrays.asList&
* client.listConfigurationSettingsForSnapshot&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshotMaxOverload -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @param fields Optional. The fields to select for the query response. If none are set, the service will return the
* ConfigurationSettings with a default set of properties.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName,
List<SettingFields> fields) {
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getKeyValuesSinglePageAsync(
null,
null,
null,
null,
fields,
snapshotName,
null,
null,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(
context -> serviceClient.getKeyValuesNextSinglePageAsync(
nextLink,
null,
null,
null,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse)))
);
}
/**
* Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided
* in descending order from their {@link ConfigurationSetting
* Revisions expire after a period of time, see <a href="https:
* for more information.
*
* If {@code selector} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched
* in their current state. Otherwise, the results returned match the parameters given in {@code selector}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions -->
* <pre>
* client.listRevisions&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions -->
*
* @param selector Optional. Used to filter configuration setting revisions from the service.
* @return Revisions of the ConfigurationSetting
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listRevisions(SettingSelector selector) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getRevisionsSinglePageAsync(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(
context ->
serviceClient.getRevisionsNextSinglePageAsync(nextLink, acceptDateTime,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))));
}
/**
* Create a {@link ConfigurationSnapshot} by providing a snapshot name and a
* {@link ConfigurationSnapshot}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.beginCreateSnapshotMaxOverload -->
* <pre>
* List<ConfigurationSettingsFilter> filters = new ArrayList<>&
* &
* filters.add&
* String snapshotName = "&
* client.beginCreateSnapshot&
* .setRetentionPeriod&
* .flatMap&
* .subscribe&
* snapshot -> System.out.printf&
* snapshot.getName&
* ex -> System.out.printf&
* ex.getMessage&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.beginCreateSnapshotMaxOverload -->
*
* @param snapshotName The name of the {@link ConfigurationSnapshot} to create.
* @param snapshot The {@link ConfigurationSnapshot} to create.
* @return A {@link PollerFlux} that polls the creating snapshot operation until it has completed or
* has failed. The completed operation returns a {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public PollerFlux<PollOperationDetails, ConfigurationSnapshot> beginCreateSnapshot(
String snapshotName, ConfigurationSnapshot snapshot) {
return createSnapshotUtilClient.beginCreateSnapshot(snapshotName, snapshot);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.getSnapshot&
* getSnapshot -> &
* System.out.printf&
* getSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> getSnapshot(String snapshotName) {
return getSnapshotWithResponse(snapshotName, null).map(Response::getValue);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByNameMaxOverload -->
* <pre>
* String snapshotName = "&
*
* client.getSnapshotWithResponse&
* SnapshotFields.STATUS, SnapshotFields.FILTERS&
* .subscribe&
* response -> &
* ConfigurationSnapshot getSnapshot = response.getValue&
* &
* &
* System.out.printf&
* getSnapshot.getName&
* List<ConfigurationSettingsFilter> filters = getSnapshot.getFilters&
* for &
* System.out.printf&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByNameMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param fields Used to select what fields are present in the returned resource(s).
* @return A {@link Mono} of {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> getSnapshotWithResponse(String snapshotName,
List<SnapshotFields> fields) {
return serviceClient.getSnapshotWithResponseAsync(snapshotName, null, null, fields, Context.NONE)
.map(response -> new SimpleResponse<>(response, response.getValue()));
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.archiveSnapshot&
* archivedSnapshot -> &
* System.out.printf&
* archivedSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotByName -->
*
* @param snapshotName The snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> archiveSnapshot(String snapshotName) {
return updateSnapshotAsync(snapshotName, null, ConfigurationSnapshotStatus.ARCHIVED, serviceClient)
.map(Response::getValue);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* client.archiveSnapshotWithResponse&
* .subscribe&
* response -> &
* ConfigurationSnapshot archivedSnapshot = response.getValue&
* System.out.printf&
* archivedSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> archiveSnapshotWithResponse(String snapshotName,
MatchConditions matchConditions) {
return updateSnapshotAsync(snapshotName, matchConditions, ConfigurationSnapshotStatus.ARCHIVED, serviceClient);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.recoverSnapshot&
* recoveredSnapshot -> &
* System.out.printf&
* recoveredSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotByName -->
*
* @param snapshotName The snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> recoverSnapshot(String snapshotName) {
return updateSnapshotAsync(snapshotName, null, ConfigurationSnapshotStatus.READY, serviceClient)
.map(Response::getValue);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* client.recoverSnapshotWithResponse&
* response -> &
* ConfigurationSnapshot recoveredSnapshot = response.getValue&
* System.out.printf&
* recoveredSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> recoverSnapshotWithResponse(
String snapshotName, MatchConditions matchConditions) {
return updateSnapshotAsync(snapshotName, matchConditions, ConfigurationSnapshotStatus.READY, serviceClient);
}
/**
* List snapshots by given {@link SnapshotSelector}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listSnapshots -->
* <pre>
* String snapshotNameFilter = "&
* client.listSnapshots&
* .subscribe&
* System.out.printf&
* recoveredSnapshot.getName&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listSnapshots -->
*
* @param selector Optional. Used to filter {@link ConfigurationSnapshot} from the service.
* @return A {@link PagedFlux} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSnapshot> listSnapshots(SnapshotSelector selector) {
try {
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getSnapshotsSinglePageAsync(
selector == null ? null : selector.getNameFilter(),
null,
selector == null ? null : selector.getFields(),
selector == null ? null : selector.getStatus(),
addTracingNamespace(context))),
nextLink -> withContext(
context -> serviceClient.getSnapshotsNextSinglePageAsync(nextLink, addTracingNamespace(context)))
);
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(LOGGER, ex));
}
}
/**
* Adds an external synchronization token to ensure service requests receive up-to-date values.
*
* @param token an external synchronization token to ensure service requests receive up-to-date values.
* @throws NullPointerException if the given token is null.
*/
public void updateSyncToken(String token) {
Objects.requireNonNull(token, "'token' cannot be null.");
syncTokenPolicy.updateSyncToken(token);
}
} | class ConfigurationAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(ConfigurationAsyncClient.class);
private final AzureAppConfigurationImpl serviceClient;
private final SyncTokenPolicy syncTokenPolicy;
final CreateSnapshotUtilClient createSnapshotUtilClient;
/**
* Creates a ConfigurationAsyncClient that sends requests to the configuration service at {@code serviceEndpoint}.
* Each service call goes through the {@code pipeline}.
*
* @param serviceClient The {@link AzureAppConfigurationImpl} that the client routes its request through.
* @param syncTokenPolicy {@link SyncTokenPolicy} to be used to update the external synchronization token to ensure
* service requests receive up-to-date values.
*/
ConfigurationAsyncClient(AzureAppConfigurationImpl serviceClient, SyncTokenPolicy syncTokenPolicy) {
this.serviceClient = serviceClient;
this.syncTokenPolicy = syncTokenPolicy;
this.createSnapshotUtilClient = new CreateSnapshotUtilClient(serviceClient);
}
/**
* Gets the service endpoint for the Azure App Configuration instance.
*
* @return the service endpoint for the Azure App Configuration instance.
*/
public String getEndpoint() {
return serviceClient.getEndpoint();
}
/**
* Adds a configuration value in the service if that key does not exist. The {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS" and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
* <pre>
* client.addConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
*
* @param key The key of the configuration setting to add.
* @param label The label of the configuration setting to add. If {@code null} no label will be used.
* @param value The value associated with this configuration setting key.
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If a ConfigurationSetting with the same key exists.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> addConfigurationSetting(String key, String label, String value) {
return addConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label).setValue(value));
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
* <pre>
* client.addConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> addConfigurationSetting(ConfigurationSetting setting) {
return addConfigurationSettingWithResponse(setting).map(Response::getValue);
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSettingWithResponse
* <pre>
* client.addConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.addConfigurationSettingWithResponse
*
* @param setting The setting to add based on its key and optional label combination.
* @return A REST response containing the {@link ConfigurationSetting} that was created, if a key collision occurs
* or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> addConfigurationSettingWithResponse(ConfigurationSetting setting) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.putKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), null, ETAG_ANY, toKeyValue(settingInternal),
addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Creates or updates a configuration value in the service with the given key. the {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection"</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
* <pre>
* client.setConfigurationSetting&
* .subscribe&
* response.getKey&
* &
* client.setConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
*
* @param key The key of the configuration setting to create or update.
* @param label The label of the configuration setting to create or update, If {@code null} no label will be used.
* @param value The value of this configuration setting.
* @return The {@link ConfigurationSetting} that was created or updated, or an empty Mono if the key is an invalid
* value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setConfigurationSetting(String key, String label, String value) {
return setConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label).setValue(value));
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection"</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
* <pre>
* client.setConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* &
* client.setConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created or updated, or an empty Mono if the key is an invalid
* value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setConfigurationSetting(ConfigurationSetting setting) {
return setConfigurationSettingWithResponse(setting, false).map(Response::getValue);
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* setting's ETag matches. If the ETag's value is equal to the wildcard character ({@code "*"}), the setting will
* always be updated.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSettingWithResponse
* <pre>
* client.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false&
* .subscribe&
* final ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* &
* client.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false&
* .subscribe&
* final ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setConfigurationSettingWithResponse
*
* @param setting The setting to create or update based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @return A REST response containing the {@link ConfigurationSetting} that was created or updated, if the key is an
* invalid value, the setting is read-only, or an ETag was provided but does not match the service's current ETag
* value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If the {@link ConfigurationSetting
* wildcard character, and the current configuration value's ETag does not match, or the setting exists and is
* read-only.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> setConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.putKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), getETag(ifUnchanged, settingInternal), null,
toKeyValue(settingInternal), addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, and the optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(String key, String label) {
return getConfigurationSetting(key, label, null);
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, the optional {@code label}, and the optional
* {@code acceptDateTime} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection" and a time that one minute before now at UTC-Zone</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* "prodDBConnection", "westUS", OffsetDateTime.now&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
*
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(String key, String label, OffsetDateTime acceptDateTime) {
return getConfigurationSettingWithResponse(new ConfigurationSetting().setKey(key).setLabel(label),
acceptDateTime, false).map(Response::getValue);
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection" and a time that one minute before now at UTC-Zone</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
* <pre>
* client.getConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSetting
*
* @param setting The setting to retrieve.
*
* @return The {@link ConfigurationSetting} stored in the service, or an empty Mono if the configuration value does
* not exist or the key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> getConfigurationSetting(ConfigurationSetting setting) {
return getConfigurationSettingWithResponse(setting, null, false).map(Response::getValue);
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSettingWithResponse
* <pre>
* client.getConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* null,
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getConfigurationSettingWithResponse
*
* @param setting The setting to retrieve.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @param ifChanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* If-None-Match header.
* @return A REST response containing the {@link ConfigurationSetting} stored in the service, or {@code null} if
* didn't exist. {@code null} is also returned if the configuration value does not exist or the key is an invalid
* value (which will also throw HttpResponseException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> getConfigurationSettingWithResponse(ConfigurationSetting setting,
OffsetDateTime acceptDateTime, boolean ifChanged) {
return withContext(
context -> validateSettingAsync(setting).flatMap(
settingInternal ->
serviceClient.getKeyValueWithResponseAsync(settingInternal.getKey(), settingInternal.getLabel(),
acceptDateTime == null ? null : acceptDateTime.toString(), null,
getETag(ifChanged, settingInternal), null, addTracingNamespace(context))
.onErrorResume(
HttpResponseException.class,
(Function<Throwable, Mono<ResponseBase<GetKeyValueHeaders, KeyValue>>>) throwable -> {
HttpResponseException e = (HttpResponseException) throwable;
HttpResponse httpResponse = e.getResponse();
if (httpResponse.getStatusCode() == 304) {
return Mono.just(new ResponseBase<GetKeyValueHeaders, KeyValue>(
httpResponse.getRequest(), httpResponse.getStatusCode(),
httpResponse.getHeaders(), null, null));
}
return Mono.error(throwable);
})
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Deletes the ConfigurationSetting with a matching {@code key} and optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
* <pre>
* client.deleteConfigurationSetting&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
*
* @param key The key of configuration setting to delete.
* @param label The label of configuration setting to delete. If {@code null} no label will be used.
* @return The deleted ConfigurationSetting or an empty Mono is also returned if the {@code key} is an invalid value
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> deleteConfigurationSetting(String key, String label) {
return deleteConfigurationSetting(new ConfigurationSetting().setKey(key).setLabel(label));
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
* <pre>
* client.deleteConfigurationSetting&
* .setKey&
* .setLabel&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSetting
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
*
* @return The deleted ConfigurationSetting or an empty Mono is also returned if the {@code key} is an invalid value
* (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> deleteConfigurationSetting(ConfigurationSetting setting) {
return deleteConfigurationSettingWithResponse(setting, false).map(Response::getValue);
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key-label "prodDBConnection"-"westUS"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSettingWithResponse
* <pre>
* client.deleteConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting responseSetting = response.getValue&
* System.out.printf&
* responseSetting.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.deleteConfigurationSettingWithResponse
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null}
* is also returned if the {@link ConfigurationSetting
* {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> deleteConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged) {
return withContext(context -> validateSettingAsync(setting).flatMap(
settingInternal -> serviceClient.deleteKeyValueWithResponseAsync(settingInternal.getKey(),
settingInternal.getLabel(), getETag(ifUnchanged, settingInternal), addTracingNamespace(context))
.map(response -> toConfigurationSettingWithResponse(response))));
}
/**
* Sets the read-only status for the {@link ConfigurationSetting} that matches the {@code key}, the optional
* {@code label}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .contextWrite&
* .subscribe&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* @param key The key of configuration setting to set to be read-only.
* @param label The label of configuration setting to read-only. If {@code null} no label will be used.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return The {@link ConfigurationSetting} that is read-only, or an empty Mono if a key collision occurs or the
* key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setReadOnly(String key, String label, boolean isReadOnly) {
return setReadOnly(new ConfigurationSetting().setKey(key).setLabel(label), isReadOnly);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .setKey&
* .setLabel&
* true&
* .subscribe&
* response.getKey&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
* <pre>
* client.setReadOnly&
* .setKey&
* .setLabel&
* false&
* .subscribe&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnly
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
*
* @return The {@link ConfigurationSetting} that is read-only, or an empty Mono if a key collision occurs or the
* key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSetting> setReadOnly(ConfigurationSetting setting, boolean isReadOnly) {
return setReadOnlyWithResponse(setting, isReadOnly).map(Response::getValue);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
* <pre>
* client.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* true&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* result.getKey&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
* <pre>
* client.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* false&
* .contextWrite&
* .subscribe&
* ConfigurationSetting result = response.getValue&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.setReadOnlyWithResponse
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return A REST response containing the read-only or not read-only ConfigurationSetting if {@code isReadOnly}
* is true or null, or false respectively. Or return {@code null} if the setting didn't exist.
* {@code null} is also returned if the {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSetting>> setReadOnlyWithResponse(ConfigurationSetting setting,
boolean isReadOnly) {
return withContext(context -> validateSettingAsync(setting).flatMap(
settingInternal -> {
final String key = settingInternal.getKey();
final String label = settingInternal.getLabel();
final Context contextInternal = addTracingNamespace(context);
return (isReadOnly
? serviceClient.putLockWithResponseAsync(key, label, null, null, contextInternal)
: serviceClient.deleteLockWithResponseAsync(key, label, null, null, contextInternal))
.map(response -> toConfigurationSettingWithResponse(response));
}));
}
/**
* Fetches the configuration settings that match the {@code selector}. If {@code selector} is {@code null}, then all
* the {@link ConfigurationSetting configuration settings} are fetched with their current values.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all settings that use the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettings -->
* <pre>
* client.listConfigurationSettings&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettings -->
*
* @param selector Optional. Selector to filter configuration setting results from the service.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their
* current values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshot -->
* <pre>
* String snapshotName = "&
* client.listConfigurationSettingsForSnapshot&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshot -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName) {
return listConfigurationSettingsForSnapshot(snapshotName, null);
}
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their
* current values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* List<SettingFields> fields = Arrays.asList&
* client.listConfigurationSettingsForSnapshot&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listConfigurationSettingsForSnapshotMaxOverload -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @param fields Optional. The fields to select for the query response. If none are set, the service will return the
* ConfigurationSettings with a default set of properties.
* @return A Flux of ConfigurationSettings that matches the {@code selector}. If no options were provided, the Flux
* contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName,
List<SettingFields> fields) {
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getKeyValuesSinglePageAsync(
null,
null,
null,
null,
fields,
snapshotName,
null,
null,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(
context -> serviceClient.getKeyValuesNextSinglePageAsync(
nextLink,
null,
null,
null,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse)))
);
}
/**
* Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided
* in descending order from their {@link ConfigurationSetting
* Revisions expire after a period of time, see <a href="https:
* for more information.
*
* If {@code selector} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched
* in their current state. Otherwise, the results returned match the parameters given in {@code selector}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions -->
* <pre>
* client.listRevisions&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions -->
*
* @param selector Optional. Used to filter configuration setting revisions from the service.
* @return Revisions of the ConfigurationSetting
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSetting> listRevisions(SettingSelector selector) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getRevisionsSinglePageAsync(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))),
nextLink -> withContext(
context ->
serviceClient.getRevisionsNextSinglePageAsync(nextLink, acceptDateTime,
addTracingNamespace(context))
.map(pagedResponse -> toConfigurationSettingWithPagedResponse(pagedResponse))));
}
/**
* Create a {@link ConfigurationSnapshot} by providing a snapshot name and a
* {@link ConfigurationSnapshot}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.beginCreateSnapshotMaxOverload -->
* <pre>
* List<ConfigurationSettingsFilter> filters = new ArrayList<>&
* &
* filters.add&
* String snapshotName = "&
* client.beginCreateSnapshot&
* .setRetentionPeriod&
* .flatMap&
* .subscribe&
* snapshot -> System.out.printf&
* snapshot.getName&
* ex -> System.out.printf&
* ex.getMessage&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.beginCreateSnapshotMaxOverload -->
*
* @param snapshotName The name of the {@link ConfigurationSnapshot} to create.
* @param snapshot The {@link ConfigurationSnapshot} to create.
* @return A {@link PollerFlux} that polls the creating snapshot operation until it has completed or
* has failed. The completed operation returns a {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public PollerFlux<PollOperationDetails, ConfigurationSnapshot> beginCreateSnapshot(
String snapshotName, ConfigurationSnapshot snapshot) {
return createSnapshotUtilClient.beginCreateSnapshot(snapshotName, snapshot);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.getSnapshot&
* getSnapshot -> &
* System.out.printf&
* getSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> getSnapshot(String snapshotName) {
return getSnapshotWithResponse(snapshotName, null).map(Response::getValue);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByNameMaxOverload -->
* <pre>
* String snapshotName = "&
*
* client.getSnapshotWithResponse&
* SnapshotFields.STATUS, SnapshotFields.FILTERS&
* .subscribe&
* response -> &
* ConfigurationSnapshot getSnapshot = response.getValue&
* &
* &
* System.out.printf&
* getSnapshot.getName&
* List<ConfigurationSettingsFilter> filters = getSnapshot.getFilters&
* for &
* System.out.printf&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.getSnapshotByNameMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param fields Used to select what fields are present in the returned resource(s).
* @return A {@link Mono} of {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> getSnapshotWithResponse(String snapshotName,
List<SnapshotFields> fields) {
return serviceClient.getSnapshotWithResponseAsync(snapshotName, null, null, fields, Context.NONE)
.map(response -> new SimpleResponse<>(response, response.getValue()));
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.archiveSnapshot&
* archivedSnapshot -> &
* System.out.printf&
* archivedSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotByName -->
*
* @param snapshotName The snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> archiveSnapshot(String snapshotName) {
return updateSnapshotAsync(snapshotName, null, ConfigurationSnapshotStatus.ARCHIVED, serviceClient)
.map(Response::getValue);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* client.archiveSnapshotWithResponse&
* .subscribe&
* response -> &
* ConfigurationSnapshot archivedSnapshot = response.getValue&
* System.out.printf&
* archivedSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.archiveSnapshotMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> archiveSnapshotWithResponse(String snapshotName,
MatchConditions matchConditions) {
return updateSnapshotAsync(snapshotName, matchConditions, ConfigurationSnapshotStatus.ARCHIVED, serviceClient);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotByName -->
* <pre>
* String snapshotName = "&
* client.recoverSnapshot&
* recoveredSnapshot -> &
* System.out.printf&
* recoveredSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotByName -->
*
* @param snapshotName The snapshot name.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<ConfigurationSnapshot> recoverSnapshot(String snapshotName) {
return updateSnapshotAsync(snapshotName, null, ConfigurationSnapshotStatus.READY, serviceClient)
.map(Response::getValue);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* client.recoverSnapshotWithResponse&
* response -> &
* ConfigurationSnapshot recoveredSnapshot = response.getValue&
* System.out.printf&
* recoveredSnapshot.getName&
* &
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.recoverSnapshotMaxOverload -->
*
* @param snapshotName The snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @return A {@link Mono} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<ConfigurationSnapshot>> recoverSnapshotWithResponse(
String snapshotName, MatchConditions matchConditions) {
return updateSnapshotAsync(snapshotName, matchConditions, ConfigurationSnapshotStatus.READY, serviceClient);
}
/**
* List snapshots by given {@link SnapshotSelector}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationasyncclient.listSnapshots -->
* <pre>
* String snapshotNameFilter = "&
* client.listSnapshots&
* .subscribe&
* System.out.printf&
* recoveredSnapshot.getName&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationasyncclient.listSnapshots -->
*
* @param selector Optional. Used to filter {@link ConfigurationSnapshot} from the service.
* @return A {@link PagedFlux} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<ConfigurationSnapshot> listSnapshots(SnapshotSelector selector) {
try {
return new PagedFlux<>(
() -> withContext(
context -> serviceClient.getSnapshotsSinglePageAsync(
selector == null ? null : selector.getNameFilter(),
null,
selector == null ? null : selector.getFields(),
selector == null ? null : selector.getStatus(),
addTracingNamespace(context))),
nextLink -> withContext(
context -> serviceClient.getSnapshotsNextSinglePageAsync(nextLink, addTracingNamespace(context)))
);
} catch (RuntimeException ex) {
return new PagedFlux<>(() -> monoError(LOGGER, ex));
}
}
/**
* Adds an external synchronization token to ensure service requests receive up-to-date values.
*
* @param token an external synchronization token to ensure service requests receive up-to-date values.
* @throws NullPointerException if the given token is null.
*/
public void updateSyncToken(String token) {
Objects.requireNonNull(token, "'token' cannot be null.");
syncTokenPolicy.updateSyncToken(token);
}
} |
Same comments as the asynchronous client | public PagedIterable<ConfigurationSetting> listConfigurationSettings(SettingSelector selector, Context context) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
final List<MatchConditions> matchConditionsList = selector == null ? null : selector.getMatchConditions();
AtomicInteger pageETagIndex = new AtomicInteger(1);
return new PagedIterable<>(
() -> {
String firstPageETag = (matchConditionsList == null || matchConditionsList.isEmpty())
? null
: matchConditionsList.get(0).getIfNoneMatch();
PagedResponse<KeyValue> pagedResponse;
try {
pagedResponse = serviceClient.getKeyValuesSinglePage(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
null,
null,
firstPageETag,
enableSyncRestProxy(addTracingNamespace(context)));
} catch (HttpResponseException ex) {
final HttpResponse httpResponse = ex.getResponse();
if (httpResponse.getStatusCode() == 304) {
String continuationToken = parseNextLink(httpResponse.getHeaderValue("link"));
return new PagedResponseBase<>(
httpResponse.getRequest(),
httpResponse.getStatusCode(),
httpResponse.getHeaders(),
null,
continuationToken,
null);
}
throw LOGGER.logExceptionAsError(ex);
}
return toConfigurationSettingWithPagedResponse(pagedResponse);
},
nextLink -> {
int pageETagListSize = (matchConditionsList == null || matchConditionsList.isEmpty()) ? 0 : matchConditionsList.size();
String nextPageETag = null;
int pageETagIndexValue = pageETagIndex.get();
if (pageETagIndexValue < pageETagListSize) {
nextPageETag = matchConditionsList.get(pageETagIndexValue).getIfNoneMatch();
pageETagIndex.set(pageETagIndexValue + 1);
}
PagedResponse<KeyValue> pagedResponse;
try {
pagedResponse = serviceClient.getKeyValuesNextSinglePage(
nextLink,
acceptDateTime,
null,
nextPageETag,
enableSyncRestProxy(addTracingNamespace(context)));
} catch (HttpResponseException ex) {
final HttpResponse httpResponse = ex.getResponse();
if (httpResponse.getStatusCode() == 304) {
String continuationToken = parseNextLink(httpResponse.getHeaderValue("link"));
return new PagedResponseBase<>(
httpResponse.getRequest(),
httpResponse.getStatusCode(),
httpResponse.getHeaders(),
null,
continuationToken,
null);
}
throw LOGGER.logExceptionAsError(ex);
}
return toConfigurationSettingWithPagedResponse(pagedResponse);
}
);
} | String firstPageETag = (matchConditionsList == null || matchConditionsList.isEmpty()) | public PagedIterable<ConfigurationSetting> listConfigurationSettings(SettingSelector selector, Context context) {
final String keyFilter = selector == null ? null : selector.getKeyFilter();
final String labelFilter = selector == null ? null : selector.getLabelFilter();
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
final List<SettingFields> settingFields = selector == null ? null : toSettingFieldsList(selector.getFields());
final List<MatchConditions> matchConditionsList = selector == null ? null : selector.getMatchConditions();
AtomicInteger pageETagIndex = new AtomicInteger(0);
return new PagedIterable<>(
() -> {
PagedResponse<KeyValue> pagedResponse;
try {
pagedResponse = serviceClient.getKeyValuesSinglePage(
keyFilter,
labelFilter,
null,
acceptDateTime,
settingFields,
null,
null,
getPageETag(matchConditionsList, pageETagIndex),
enableSyncRestProxy(addTracingNamespace(context)));
} catch (HttpResponseException ex) {
return handleNotModifiedErrorToValidResponse(ex, LOGGER);
}
return toConfigurationSettingWithPagedResponse(pagedResponse);
},
nextLink -> {
PagedResponse<KeyValue> pagedResponse;
try {
pagedResponse = serviceClient.getKeyValuesNextSinglePage(
nextLink,
acceptDateTime,
null,
getPageETag(matchConditionsList, pageETagIndex),
enableSyncRestProxy(addTracingNamespace(context)));
} catch (HttpResponseException ex) {
return handleNotModifiedErrorToValidResponse(ex, LOGGER);
}
return toConfigurationSettingWithPagedResponse(pagedResponse);
}
);
} | class ConfigurationClient {
private static final ClientLogger LOGGER = new ClientLogger(ConfigurationClient.class);
private final AzureAppConfigurationImpl serviceClient;
private final SyncTokenPolicy syncTokenPolicy;
final CreateSnapshotUtilClient createSnapshotUtilClient;
/**
* Creates a ConfigurationClient that sends requests to the configuration service at {@code serviceEndpoint}. Each
* service call goes through the {@code pipeline}.
*
* @param serviceClient The {@link AzureAppConfigurationImpl} that the client routes its request through.
* @param syncTokenPolicy {@link SyncTokenPolicy} to be used to update the external synchronization token to ensure
* service requests receive up-to-date values.
*/
ConfigurationClient(AzureAppConfigurationImpl serviceClient, SyncTokenPolicy syncTokenPolicy) {
this.serviceClient = serviceClient;
this.syncTokenPolicy = syncTokenPolicy;
this.createSnapshotUtilClient = new CreateSnapshotUtilClient(serviceClient);
}
/**
* Gets the service endpoint for the Azure App Configuration instance.
*
* @return the service endpoint for the Azure App Configuration instance.
*/
public String getEndpoint() {
return serviceClient.getEndpoint();
}
/**
* Adds a configuration value in the service if that key does not exist. The {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS" and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSetting
* <pre>
* ConfigurationSetting result = configurationClient
* .addConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSetting
*
* @param key The key of the configuration setting to add.
* @param label The label of the configuration setting to create. If {@code null} no label will be used.
* @param value The value associated with this configuration setting key.
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If a ConfigurationSetting with the same key exists.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting addConfigurationSetting(String key, String label, String value) {
return addConfigurationSettingWithResponse(
new ConfigurationSetting().setKey(key).setLabel(label).setValue(value), Context.NONE).getValue();
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS" and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSetting
* <pre>
* ConfigurationSetting setting = configurationClient.addConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw ServiceRequestException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting addConfigurationSetting(ConfigurationSetting setting) {
return addConfigurationSettingWithResponse(setting, Context.NONE).getValue();
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSettingWithResponse
* <pre>
* Response<ConfigurationSetting> responseResultSetting = configurationClient
* .addConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* new Context&
* ConfigurationSetting resultSetting = responseResultSetting.getValue&
* System.out.printf&
* resultSetting.getValue&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSettingWithResponse
*
* @param setting The setting to add based on its key and optional label combination.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response containing the the {@link ConfigurationSetting} that was created, or {@code null}, if a
* key collision occurs or the key is an invalid value (which will also throw ServiceRequestException described
* below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> addConfigurationSettingWithResponse(ConfigurationSetting setting,
Context context) {
validateSetting(setting);
final ResponseBase<PutKeyValueHeaders, KeyValue> response =
serviceClient.putKeyValueWithResponse(setting.getKey(), setting.getLabel(), null, ETAG_ANY,
toKeyValue(setting), enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithResponse(response);
}
/**
* Creates or updates a configuration value in the service with the given key and. the {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSetting
* <pre>
* ConfigurationSetting result = configurationClient
* .setConfigurationSetting&
* System.out.printf&
*
* &
* result = configurationClient.setConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSetting
*
* @param key The key of the configuration setting to create or update.
* @param label The label of the configuration setting to create or update. If {@code null} no label will be used.
* @param value The value of this configuration setting.
* @return The {@link ConfigurationSetting} that was created or updated, or {@code null} if the key is an invalid
* value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting setConfigurationSetting(String key, String label, String value) {
return setConfigurationSettingWithResponse(
new ConfigurationSetting().setKey(key).setLabel(label).setValue(value), false, Context.NONE).getValue();
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSetting
* <pre>
* ConfigurationSetting setting = configurationClient.setConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* System.out.printf&
*
* &
* setting = configurationClient.setConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSetting
*
* @param setting The setting to create or update based on its key, optional label and optional ETag combination.
*
* @return The {@link ConfigurationSetting} that was created or updated, or {@code null} if the key is an invalid
* value (which will also throw ServiceRequestException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If the {@link ConfigurationSetting
* wildcard character, and the current configuration value's ETag does not match, or the setting exists and is
* read-only.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting setConfigurationSetting(ConfigurationSetting setting) {
return setConfigurationSettingWithResponse(setting, false, Context.NONE).getValue();
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* setting's ETag matches. If the ETag's value is equal to the wildcard character ({@code "*"}), the setting will
* always be updated.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSettingWithResponse
* <pre>
* &
* Response<ConfigurationSetting> responseSetting = configurationClient.setConfigurationSettingWithResponse&
* new ConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* false,
* new Context&
* ConfigurationSetting initSetting = responseSetting.getValue&
* System.out.printf&
*
* &
* responseSetting = configurationClient.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false,
* new Context&
* ConfigurationSetting updatedSetting = responseSetting.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSettingWithResponse
*
* @param setting The setting to create or update based on its key, optional label and optional ETag combination.
* @param ifUnchanged A boolean indicates if {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response contains the {@link ConfigurationSetting} that was created or updated, or {@code null},
* if the configuration value does not exist or the key is an invalid value (which will also throw
* ServiceRequestException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If the {@link ConfigurationSetting
* wildcard character, and the current configuration value's ETag does not match, or the setting exists and is
* read-only.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> setConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged, Context context) {
validateSetting(setting);
final ResponseBase<PutKeyValueHeaders, KeyValue> response =
serviceClient.putKeyValueWithResponse(setting.getKey(), setting.getLabel(),
getETag(ifUnchanged, setting), null, toKeyValue(setting),
enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithResponse(response);
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, and the optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
* <pre>
* ConfigurationSetting resultNoDateTime = configurationClient.getConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
*
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does
* not exist or the key is an invalid value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting getConfigurationSetting(String key, String label) {
return getConfigurationSetting(key, label, null);
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, the optional {@code label}, and the optional
* {@code acceptDateTime} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
* <pre>
* ConfigurationSetting result =
* configurationClient.getConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
*
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to create or update. If {@code null} no label will be used.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does
* not exist or the key is an invalid value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting getConfigurationSetting(String key, String label, OffsetDateTime acceptDateTime) {
return getConfigurationSettingWithResponse(new ConfigurationSetting().setKey(key).setLabel(label),
acceptDateTime, false, Context.NONE).getValue();
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
* <pre>
* ConfigurationSetting setting = configurationClient.getConfigurationSetting&
* .setKey&
* .setLabel&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
*
* @param setting The setting to retrieve.
*
* @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does
* not exist or the key is an invalid value (which will also throw ServiceRequestException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting getConfigurationSetting(ConfigurationSetting setting) {
return getConfigurationSettingWithResponse(setting, null, false, Context.NONE).getValue();
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.getConfigurationSettingWithResponse
* <pre>
* &
* Response<ConfigurationSetting> responseResultSetting = configurationClient.getConfigurationSettingWithResponse&
* new ConfigurationSetting&
* .setKey&
* .setLabel&
* null,
* false,
* new Context&
* System.out.printf&
* responseResultSetting.getValue&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.getConfigurationSettingWithResponse
*
* @param setting The setting to retrieve.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @param ifChanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* If-None-Match header.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response contains the {@link ConfigurationSetting} stored in the service, or {@code null}, if the
* configuration value does not exist or the key is an invalid value (which will also throw ServiceRequestException
* described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> getConfigurationSettingWithResponse(ConfigurationSetting setting,
OffsetDateTime acceptDateTime, boolean ifChanged, Context context) {
validateSetting(setting);
try {
final ResponseBase<GetKeyValueHeaders, KeyValue> response =
serviceClient.getKeyValueWithResponse(setting.getKey(), setting.getLabel(),
acceptDateTime == null ? null : acceptDateTime.toString(), null,
getETag(ifChanged, setting), null, enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithResponse(response);
} catch (HttpResponseException ex) {
final HttpResponse httpResponse = ex.getResponse();
if (httpResponse.getStatusCode() == 304) {
return new ResponseBase<Void, ConfigurationSetting>(httpResponse.getRequest(),
httpResponse.getStatusCode(), httpResponse.getHeaders(), null, null);
}
throw LOGGER.logExceptionAsError(ex);
}
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@code key} and optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.deleteConfigurationSetting
* <pre>
* ConfigurationSetting result = configurationClient.deleteConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.deleteConfigurationSetting
*
* @param key The key of configuration setting to delete.
* @param label The label of configuration setting to delete. If {@code null} no label will be used.
* @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if the
* {@code key} is an invalid value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting deleteConfigurationSetting(String key, String label) {
return deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey(key).setLabel(label),
false, Context.NONE).getValue();
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.deleteConfigurationSetting
* <pre>
* ConfigurationSetting setting = configurationClient.deleteConfigurationSetting&
* .setKey&
* .setLabel&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.deleteConfigurationSetting
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
*
* @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if the
* {@code key} is an invalid value (which will also throw ServiceRequestException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting deleteConfigurationSetting(ConfigurationSetting setting) {
return deleteConfigurationSettingWithResponse(setting, false, Context.NONE).getValue();
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.deleteConfigurationSettingWithResponse
* <pre>
* Response<ConfigurationSetting> responseSetting = configurationClient.deleteConfigurationSettingWithResponse&
* new ConfigurationSetting&
* .setKey&
* .setLabel&
* false,
* new Context&
* System.out.printf&
* "Key: %s, Value: %s", responseSetting.getValue&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.deleteConfigurationSettingWithResponse
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null}
* is also returned if the {@link ConfigurationSetting
* {@link ConfigurationSetting
* (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> deleteConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged, Context context) {
validateSetting(setting);
final ResponseBase<DeleteKeyValueHeaders, KeyValue> response =
serviceClient.deleteKeyValueWithResponse(setting.getKey(), setting.getLabel(),
getETag(ifUnchanged, setting), enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithResponse(response);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting} that matches the {@code key}, the optional
* {@code label}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnly
* <pre>
* ConfigurationSetting result = configurationClient.setReadOnly&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnly
* <pre>
* ConfigurationSetting result = configurationClient.setReadOnly&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnly
*
* @param key The key of configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param label The label of configuration setting to set to read-only or not read-only based on the
* {@code isReadOnly} value, or optionally. If {@code null} no label will be used.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return The {@link ConfigurationSetting} that is read-only, or {@code null} is also returned if a key collision
* occurs or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting setReadOnly(String key, String label, boolean isReadOnly) {
return setReadOnlyWithResponse(new ConfigurationSetting().setKey(key).setLabel(label), isReadOnly, Context.NONE)
.getValue();
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnly
* <pre>
* ConfigurationSetting setting = configurationClient.setReadOnly&
* .setKey&
* .setLabel&
* true&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnly
* <pre>
* ConfigurationSetting setting = configurationClient.setReadOnly&
* .setKey&
* .setLabel&
* false&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnly
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
*
* @return The {@link ConfigurationSetting} that is read-only, or {@code null} is also returned if a key collision
* occurs or the key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting setReadOnly(ConfigurationSetting setting, boolean isReadOnly) {
return setReadOnlyWithResponse(setting, isReadOnly, Context.NONE).getValue();
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnlyWithResponse
* <pre>
* ConfigurationSetting resultSetting = configurationClient.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* true,
* Context.NONE&
* .getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnlyWithResponse
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnlyWithResponse
* <pre>
* Response<ConfigurationSetting> responseSetting = configurationClient
* .setConfigurationSettingWithResponse&
* new ConfigurationSetting&
* new Context&
* System.out.printf&
* responseSetting.getValue&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnlyWithResponse
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response containing the read-only or not read-only ConfigurationSetting if {@code isReadOnly}
* is true or null, or false respectively. Or return {@code null} if the setting didn't exist.
* {@code null} is also returned if the {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> setReadOnlyWithResponse(ConfigurationSetting setting, boolean isReadOnly,
Context context) {
validateSetting(setting);
final String key = setting.getKey();
final String label = setting.getLabel();
context = enableSyncRestProxy(addTracingNamespace(context));
return isReadOnly
? toConfigurationSettingWithResponse(serviceClient.putLockWithResponse(key, label, null, null, context))
: toConfigurationSettingWithResponse(serviceClient.deleteLockWithResponse(key, label, null, null, context));
}
/**
* Fetches the configuration settings that match the {@code selector}. If {@code selector} is {@code null}, then all
* the {@link ConfigurationSetting configuration settings} are fetched with their current values.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all settings that use the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listConfigurationSettings
* <pre>
* SettingSelector settingSelector = new SettingSelector&
* configurationClient.listConfigurationSettings&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listConfigurationSettings
*
* @param selector Optional. Selector to filter configuration setting results from the service.
* @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code selector}. If no options were
* provided, the List contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listConfigurationSettings(SettingSelector selector) {
return listConfigurationSettings(selector, Context.NONE);
}
/**
* Fetches the configuration settings that match the {@code selector}. If {@code selector} is {@code null}, then all
* the {@link ConfigurationSetting configuration settings} are fetched with their current values.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all settings that use the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listConfigurationSettings
* <pre>
* SettingSelector settingSelector = new SettingSelector&
* Context ctx = new Context&
* configurationClient.listConfigurationSettings&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listConfigurationSettings
*
* @param selector Optional. Selector to filter configuration setting results from the service.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code selector}. If no options were
* provided, the {@link PagedIterable} contains all the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their current
* values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listConfigurationSettingsForSnapshot -->
* <pre>
* String snapshotName = "&
* configurationClient.listConfigurationSettingsForSnapshot&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listConfigurationSettingsForSnapshot -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code selector}. If no options were
* provided, the List contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName) {
return listConfigurationSettingsForSnapshot(snapshotName, null, Context.NONE);
}
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their current
* values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listConfigurationSettingsForSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* List<SettingFields> fields = Arrays.asList&
* Context ctx = new Context&
* configurationClient.listConfigurationSettingsForSnapshot&
* .forEach&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listConfigurationSettingsForSnapshotMaxOverload -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @param fields Optional. The fields to select for the query response. If none are set, the service will return the
* ConfigurationSettings with a default set of properties.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code selector}. If no options were
* provided, the {@link PagedIterable} contains all the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName,
List<SettingFields> fields,
Context context) {
return new PagedIterable<>(
() -> {
final PagedResponse<KeyValue> pagedResponse = serviceClient.getKeyValuesSinglePage(
null,
null,
null,
null,
fields,
snapshotName,
null,
null,
enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithPagedResponse(pagedResponse);
},
nextLink -> {
final PagedResponse<KeyValue> pagedResponse = serviceClient.getKeyValuesNextSinglePage(nextLink,
null, null, null, enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithPagedResponse(pagedResponse);
}
);
}
/**
* Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided
* in descending order from their {@link ConfigurationSetting
* Revisions expire after a period of time, see <a href="https:
* for more information.
*
*
* If {@code selector} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched
* in their current state. Otherwise, the results returned match the parameters given in {@code selector}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listRevisions
* <pre>
* SettingSelector settingSelector = new SettingSelector&
* configurationClient.listRevisions&
* System.out.printf&
* resp.getRequest&
* resp.getItems&
* System.out.printf&
* &
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listRevisions
*
* @param selector Optional. Used to filter configuration setting revisions from the service.
* @return {@link PagedIterable} of {@link ConfigurationSetting} revisions.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listRevisions(SettingSelector selector) {
return listRevisions(selector, Context.NONE);
}
/**
* Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided
* in descending order from their {@link ConfigurationSetting
* Revisions expire after a period of time, see <a href="https:
* for more information.
*
* If {@code selector} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched
* in their current state. Otherwise, the results returned match the parameters given in {@code selector}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listRevisions
* <pre>
* SettingSelector settingSelector = new SettingSelector&
* Context ctx = new Context&
* configurationClient.listRevisions&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listRevisions
*
* @param selector Optional. Used to filter configuration setting revisions from the service.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link PagedIterable} of {@link ConfigurationSetting} revisions.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listRevisions(SettingSelector selector, Context context) {
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
return new PagedIterable<>(
() -> {
final PagedResponse<KeyValue> pagedResponse = serviceClient.getRevisionsSinglePage(
selector == null ? null : selector.getKeyFilter(),
selector == null ? null : selector.getLabelFilter(),
null,
acceptDateTime,
selector == null ? null : toSettingFieldsList(selector.getFields()),
enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithPagedResponse(pagedResponse);
},
nextLink -> {
final PagedResponse<KeyValue> pagedResponse = serviceClient.getRevisionsNextSinglePage(nextLink,
acceptDateTime, enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithPagedResponse(pagedResponse);
}
);
}
/**
* Create a {@link ConfigurationSnapshot} by providing a snapshot name and a
* {@link ConfigurationSnapshot}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.beginCreateSnapshotMaxOverload -->
* <pre>
* List<ConfigurationSettingsFilter> filters = new ArrayList<>&
* &
* filters.add&
* String snapshotName = "&
* Context ctx = new Context&
*
* SyncPoller<PollOperationDetails, ConfigurationSnapshot> poller =
* client.beginCreateSnapshot&
* new ConfigurationSnapshot&
* poller.setPollInterval&
* poller.waitForCompletion&
* ConfigurationSnapshot snapshot = poller.getFinalResult&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.beginCreateSnapshotMaxOverload -->
*
* @param snapshotName The name of the {@link ConfigurationSnapshot} to create.
* @param snapshot The snapshot to create.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link SyncPoller} that polls the creating snapshot operation until it has completed or
* has failed. The completed operation returns a {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public SyncPoller<PollOperationDetails, ConfigurationSnapshot> beginCreateSnapshot(
String snapshotName, ConfigurationSnapshot snapshot, Context context) {
return createSnapshotUtilClient.beginCreateSnapshot(snapshotName, snapshot, context);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.getSnapshotByName -->
* <pre>
* String snapshotName = "&
* ConfigurationSnapshot getSnapshot = client.getSnapshot&
* System.out.printf&
* getSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.getSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSnapshot getSnapshot(String snapshotName) {
return getSnapshotWithResponse(snapshotName, null, Context.NONE).getValue();
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.getSnapshotByNameMaxOverload -->
* <pre>
* String snapshotName = "&
* Context ctx = new Context&
* ConfigurationSnapshot getSnapshot = client.getSnapshotWithResponse&
* snapshotName,
* Arrays.asList&
* ctx&
* .getValue&
* &
* &
* System.out.printf&
* getSnapshot.getName&
* List<ConfigurationSettingsFilter> filters = getSnapshot.getFilters&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.getSnapshotByNameMaxOverload -->
*
* @param snapshotName the snapshot name.
* @param fields Used to select what fields are present in the returned resource(s).
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSnapshot> getSnapshotWithResponse(String snapshotName, List<SnapshotFields> fields,
Context context) {
final ResponseBase<GetSnapshotHeaders, ConfigurationSnapshot> response =
serviceClient.getSnapshotWithResponse(snapshotName, null, null, fields, context);
return new SimpleResponse<>(response, response.getValue());
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.archiveSnapshotByName -->
* <pre>
* String snapshotName = "&
* ConfigurationSnapshot archivedSnapshot = client.archiveSnapshot&
* System.out.printf&
* archivedSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.archiveSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSnapshot archiveSnapshot(String snapshotName) {
return updateSnapshotSync(snapshotName, null, ConfigurationSnapshotStatus.ARCHIVED, serviceClient,
Context.NONE).getValue();
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.archiveSnapshotByNameMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* Context ctx = new Context&
*
* ConfigurationSnapshot archivedSnapshot = client.archiveSnapshotWithResponse&
* .getValue&
* System.out.printf&
* archivedSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.archiveSnapshotByNameMaxOverload -->
*
* @param snapshotName the snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSnapshot> archiveSnapshotWithResponse(String snapshotName,
MatchConditions matchConditions, Context context) {
return updateSnapshotSync(snapshotName, matchConditions, ConfigurationSnapshotStatus.ARCHIVED, serviceClient,
context);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.recoverSnapshotByName -->
* <pre>
* String snapshotName = "&
* ConfigurationSnapshot recoveredSnapshot = client.recoverSnapshot&
* System.out.printf&
* recoveredSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.recoverSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSnapshot recoverSnapshot(String snapshotName) {
return updateSnapshotSync(snapshotName, null, ConfigurationSnapshotStatus.READY, serviceClient,
Context.NONE).getValue();
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.recoverSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* Context ctx = new Context&
*
* ConfigurationSnapshot recoveredSnapshot = client.recoverSnapshotWithResponse&
* .getValue&
* System.out.printf&
* recoveredSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.recoverSnapshotMaxOverload -->
*
* @param snapshotName the snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSnapshot> recoverSnapshotWithResponse(String snapshotName,
MatchConditions matchConditions, Context context) {
return updateSnapshotSync(snapshotName, matchConditions, ConfigurationSnapshotStatus.READY, serviceClient,
context);
}
/**
* List snapshots by given {@link SnapshotSelector}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.listSnapshots -->
* <pre>
* String snapshotNameFilter = "&
* client.listSnapshots&
* .forEach&
* System.out.printf&
* snapshotResult.getName&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.listSnapshots -->
*
* @param selector Optional. Used to filter {@link ConfigurationSnapshot} from the service.
* @return A {@link PagedIterable} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSnapshot> listSnapshots(SnapshotSelector selector) {
return listSnapshots(selector, Context.NONE);
}
/**
* List snapshots by given {@link SnapshotSelector}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.listSnapshotsMaxOverload -->
* <pre>
* String snapshotNameFilter = "&
* Context ctx = new Context&
*
* client.listSnapshots&
* .forEach&
* System.out.printf&
* snapshotResult.getName&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.listSnapshotsMaxOverload -->
*
* @param selector Optional. Used to filter {@link ConfigurationSnapshot} from the service.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link PagedIterable} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSnapshot> listSnapshots(SnapshotSelector selector, Context context) {
return new PagedIterable<>(
() -> serviceClient.getSnapshotsSinglePage(
selector == null ? null : selector.getNameFilter(),
null,
selector == null ? null : selector.getFields(),
selector == null ? null : selector.getStatus(),
enableSyncRestProxy(addTracingNamespace(context))),
nextLink -> serviceClient.getSnapshotsNextSinglePage(nextLink,
enableSyncRestProxy(addTracingNamespace(context))));
}
/**
* Adds an external synchronization token to ensure service requests receive up-to-date values.
*
* @param token an external synchronization token to ensure service requests receive up-to-date values.
* @throws NullPointerException if the given token is null.
*/
public void updateSyncToken(String token) {
syncTokenPolicy.updateSyncToken(token);
}
} | class ConfigurationClient {
private static final ClientLogger LOGGER = new ClientLogger(ConfigurationClient.class);
private final AzureAppConfigurationImpl serviceClient;
private final SyncTokenPolicy syncTokenPolicy;
final CreateSnapshotUtilClient createSnapshotUtilClient;
/**
* Creates a ConfigurationClient that sends requests to the configuration service at {@code serviceEndpoint}. Each
* service call goes through the {@code pipeline}.
*
* @param serviceClient The {@link AzureAppConfigurationImpl} that the client routes its request through.
* @param syncTokenPolicy {@link SyncTokenPolicy} to be used to update the external synchronization token to ensure
* service requests receive up-to-date values.
*/
ConfigurationClient(AzureAppConfigurationImpl serviceClient, SyncTokenPolicy syncTokenPolicy) {
this.serviceClient = serviceClient;
this.syncTokenPolicy = syncTokenPolicy;
this.createSnapshotUtilClient = new CreateSnapshotUtilClient(serviceClient);
}
/**
* Gets the service endpoint for the Azure App Configuration instance.
*
* @return the service endpoint for the Azure App Configuration instance.
*/
public String getEndpoint() {
return serviceClient.getEndpoint();
}
/**
* Adds a configuration value in the service if that key does not exist. The {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS" and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSetting
* <pre>
* ConfigurationSetting result = configurationClient
* .addConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSetting
*
* @param key The key of the configuration setting to add.
* @param label The label of the configuration setting to create. If {@code null} no label will be used.
* @param value The value associated with this configuration setting key.
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If a ConfigurationSetting with the same key exists.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting addConfigurationSetting(String key, String label, String value) {
return addConfigurationSettingWithResponse(
new ConfigurationSetting().setKey(key).setLabel(label).setValue(value), Context.NONE).getValue();
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS" and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSetting
* <pre>
* ConfigurationSetting setting = configurationClient.addConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSetting
*
* @param setting The setting to add based on its key and optional label combination.
*
* @return The {@link ConfigurationSetting} that was created, or {@code null} if a key collision occurs or the key
* is an invalid value (which will also throw ServiceRequestException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting addConfigurationSetting(ConfigurationSetting setting) {
return addConfigurationSettingWithResponse(setting, Context.NONE).getValue();
}
/**
* Adds a configuration value in the service if that key and label does not exist. The label value of the
* ConfigurationSetting is optional.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSettingWithResponse
* <pre>
* Response<ConfigurationSetting> responseResultSetting = configurationClient
* .addConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* new Context&
* ConfigurationSetting resultSetting = responseResultSetting.getValue&
* System.out.printf&
* resultSetting.getValue&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.addConfigurationSettingWithResponse
*
* @param setting The setting to add based on its key and optional label combination.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response containing the the {@link ConfigurationSetting} that was created, or {@code null}, if a
* key collision occurs or the key is an invalid value (which will also throw ServiceRequestException described
* below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> addConfigurationSettingWithResponse(ConfigurationSetting setting,
Context context) {
validateSetting(setting);
final ResponseBase<PutKeyValueHeaders, KeyValue> response =
serviceClient.putKeyValueWithResponse(setting.getKey(), setting.getLabel(), null, ETAG_ANY,
toKeyValue(setting), enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithResponse(response);
}
/**
* Creates or updates a configuration value in the service with the given key and. the {@code label} is optional.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection", "westUS" and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSetting
* <pre>
* ConfigurationSetting result = configurationClient
* .setConfigurationSetting&
* System.out.printf&
*
* &
* result = configurationClient.setConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSetting
*
* @param key The key of the configuration setting to create or update.
* @param label The label of the configuration setting to create or update. If {@code null} no label will be used.
* @param value The value of this configuration setting.
* @return The {@link ConfigurationSetting} that was created or updated, or {@code null} if the key is an invalid
* value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If the setting exists and is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting setConfigurationSetting(String key, String label, String value) {
return setConfigurationSettingWithResponse(
new ConfigurationSetting().setKey(key).setLabel(label).setValue(value), false, Context.NONE).getValue();
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSetting
* <pre>
* ConfigurationSetting setting = configurationClient.setConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* System.out.printf&
*
* &
* setting = configurationClient.setConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSetting
*
* @param setting The setting to create or update based on its key, optional label and optional ETag combination.
*
* @return The {@link ConfigurationSetting} that was created or updated, or {@code null} if the key is an invalid
* value (which will also throw ServiceRequestException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If the {@link ConfigurationSetting
* wildcard character, and the current configuration value's ETag does not match, or the setting exists and is
* read-only.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting setConfigurationSetting(ConfigurationSetting setting) {
return setConfigurationSettingWithResponse(setting, false, Context.NONE).getValue();
}
/**
* Creates or updates a configuration value in the service. Partial updates are not supported and the entire
* configuration setting is updated.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* setting's ETag matches. If the ETag's value is equal to the wildcard character ({@code "*"}), the setting will
* always be updated.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p>
* <p>Update setting's value "db_connection" to "updated_db_connection"</p>
*
* <!-- src_embed com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSettingWithResponse
* <pre>
* &
* Response<ConfigurationSetting> responseSetting = configurationClient.setConfigurationSettingWithResponse&
* new ConfigurationSetting&
* .setKey&
* .setLabel&
* .setValue&
* false,
* new Context&
* ConfigurationSetting initSetting = responseSetting.getValue&
* System.out.printf&
*
* &
* responseSetting = configurationClient.setConfigurationSettingWithResponse&
* .setKey&
* .setLabel&
* .setValue&
* false,
* new Context&
* ConfigurationSetting updatedSetting = responseSetting.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.ConfigurationClient.setConfigurationSettingWithResponse
*
* @param setting The setting to create or update based on its key, optional label and optional ETag combination.
* @param ifUnchanged A boolean indicates if {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response contains the {@link ConfigurationSetting} that was created or updated, or {@code null},
* if the configuration value does not exist or the key is an invalid value (which will also throw
* ServiceRequestException described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceModifiedException If the {@link ConfigurationSetting
* wildcard character, and the current configuration value's ETag does not match, or the setting exists and is
* read-only.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> setConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged, Context context) {
validateSetting(setting);
final ResponseBase<PutKeyValueHeaders, KeyValue> response =
serviceClient.putKeyValueWithResponse(setting.getKey(), setting.getLabel(),
getETag(ifUnchanged, setting), null, toKeyValue(setting),
enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithResponse(response);
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, and the optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
* <pre>
* ConfigurationSetting resultNoDateTime = configurationClient.getConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
*
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to retrieve. If {@code null} no label will be used.
* @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does
* not exist or the key is an invalid value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting getConfigurationSetting(String key, String label) {
return getConfigurationSetting(key, label, null);
}
/**
* Attempts to get a ConfigurationSetting that matches the {@code key}, the optional {@code label}, and the optional
* {@code acceptDateTime} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
* <pre>
* ConfigurationSetting result =
* configurationClient.getConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
*
* @param key The key of the setting to retrieve.
* @param label The label of the configuration setting to create or update. If {@code null} no label will be used.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does
* not exist or the key is an invalid value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting getConfigurationSetting(String key, String label, OffsetDateTime acceptDateTime) {
return getConfigurationSettingWithResponse(new ConfigurationSetting().setKey(key).setLabel(label),
acceptDateTime, false, Context.NONE).getValue();
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
* <pre>
* ConfigurationSetting setting = configurationClient.getConfigurationSetting&
* .setKey&
* .setLabel&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.getConfigurationSetting
*
* @param setting The setting to retrieve.
*
* @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does
* not exist or the key is an invalid value (which will also throw ServiceRequestException described below).
*
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting getConfigurationSetting(ConfigurationSetting setting) {
return getConfigurationSettingWithResponse(setting, null, false, Context.NONE).getValue();
}
/**
* Attempts to get the ConfigurationSetting with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.getConfigurationSettingWithResponse
* <pre>
* &
* Response<ConfigurationSetting> responseResultSetting = configurationClient.getConfigurationSettingWithResponse&
* new ConfigurationSetting&
* .setKey&
* .setLabel&
* null,
* false,
* new Context&
* System.out.printf&
* responseResultSetting.getValue&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.getConfigurationSettingWithResponse
*
* @param setting The setting to retrieve.
* @param acceptDateTime Datetime to access a past state of the configuration setting. If {@code null}
* then the current state of the configuration setting will be returned.
* @param ifChanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* If-None-Match header.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response contains the {@link ConfigurationSetting} stored in the service, or {@code null}, if the
* configuration value does not exist or the key is an invalid value (which will also throw ServiceRequestException
* described below).
* @throws NullPointerException If {@code setting} is {@code null}.
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist.
* @throws HttpResponseException If the {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> getConfigurationSettingWithResponse(ConfigurationSetting setting,
OffsetDateTime acceptDateTime, boolean ifChanged, Context context) {
validateSetting(setting);
try {
final ResponseBase<GetKeyValueHeaders, KeyValue> response =
serviceClient.getKeyValueWithResponse(setting.getKey(), setting.getLabel(),
acceptDateTime == null ? null : acceptDateTime.toString(), null,
getETag(ifChanged, setting), null, enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithResponse(response);
} catch (HttpResponseException ex) {
final HttpResponse httpResponse = ex.getResponse();
if (httpResponse.getStatusCode() == 304) {
return new ResponseBase<Void, ConfigurationSetting>(httpResponse.getRequest(),
httpResponse.getStatusCode(), httpResponse.getHeaders(), null, null);
}
throw LOGGER.logExceptionAsError(ex);
}
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@code key} and optional {@code label} combination.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.deleteConfigurationSetting
* <pre>
* ConfigurationSetting result = configurationClient.deleteConfigurationSetting&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.deleteConfigurationSetting
*
* @param key The key of configuration setting to delete.
* @param label The label of configuration setting to delete. If {@code null} no label will be used.
* @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if the
* {@code key} is an invalid value (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting deleteConfigurationSetting(String key, String label) {
return deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey(key).setLabel(label),
false, Context.NONE).getValue();
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.deleteConfigurationSetting
* <pre>
* ConfigurationSetting setting = configurationClient.deleteConfigurationSetting&
* .setKey&
* .setLabel&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.deleteConfigurationSetting
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
*
* @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if the
* {@code key} is an invalid value (which will also throw ServiceRequestException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting deleteConfigurationSetting(ConfigurationSetting setting) {
return deleteConfigurationSettingWithResponse(setting, false, Context.NONE).getValue();
}
/**
* Deletes the {@link ConfigurationSetting} with a matching {@link ConfigurationSetting
* {@link ConfigurationSetting
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* If {@link ConfigurationSetting
* the setting is <b>only</b> deleted if the ETag matches the current ETag; this means that no one has updated the
* ConfigurationSetting yet.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the setting with the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.deleteConfigurationSettingWithResponse
* <pre>
* Response<ConfigurationSetting> responseSetting = configurationClient.deleteConfigurationSettingWithResponse&
* new ConfigurationSetting&
* .setKey&
* .setLabel&
* false,
* new Context&
* System.out.printf&
* "Key: %s, Value: %s", responseSetting.getValue&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.deleteConfigurationSettingWithResponse
*
* @param setting The setting to delete based on its key, optional label and optional ETag combination.
* @param ifUnchanged Flag indicating if the {@code setting} {@link ConfigurationSetting
* IF-MATCH header.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null}
* is also returned if the {@link ConfigurationSetting
* {@link ConfigurationSetting
* (which will also throw ServiceRequestException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws NullPointerException When {@code setting} is {@code null}.
* @throws ResourceModifiedException If {@code setting} is read-only.
* @throws ResourceNotFoundException If {@link ConfigurationSetting
* character, and does not match the current ETag value.
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> deleteConfigurationSettingWithResponse(ConfigurationSetting setting,
boolean ifUnchanged, Context context) {
validateSetting(setting);
final ResponseBase<DeleteKeyValueHeaders, KeyValue> response =
serviceClient.deleteKeyValueWithResponse(setting.getKey(), setting.getLabel(),
getETag(ifUnchanged, setting), enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithResponse(response);
}
/**
* Sets the read-only status for the {@link ConfigurationSetting} that matches the {@code key}, the optional
* {@code label}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnly
* <pre>
* ConfigurationSetting result = configurationClient.setReadOnly&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnly
* <pre>
* ConfigurationSetting result = configurationClient.setReadOnly&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnly
*
* @param key The key of configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param label The label of configuration setting to set to read-only or not read-only based on the
* {@code isReadOnly} value, or optionally. If {@code null} no label will be used.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @return The {@link ConfigurationSetting} that is read-only, or {@code null} is also returned if a key collision
* occurs or the key is an invalid value (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@code key} is {@code null}.
* @throws HttpResponseException If {@code key} is an empty string.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting setReadOnly(String key, String label, boolean isReadOnly) {
return setReadOnlyWithResponse(new ConfigurationSetting().setKey(key).setLabel(label), isReadOnly, Context.NONE)
.getValue();
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnly
* <pre>
* ConfigurationSetting setting = configurationClient.setReadOnly&
* .setKey&
* .setLabel&
* true&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnly
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnly
* <pre>
* ConfigurationSetting setting = configurationClient.setReadOnly&
* .setKey&
* .setLabel&
* false&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnly
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
*
* @return The {@link ConfigurationSetting} that is read-only, or {@code null} is also returned if a key collision
* occurs or the key is an invalid value (which will also throw HttpResponseException described below).
*
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSetting setReadOnly(ConfigurationSetting setting, boolean isReadOnly) {
return setReadOnlyWithResponse(setting, isReadOnly, Context.NONE).getValue();
}
/**
* Sets the read-only status for the {@link ConfigurationSetting}.
*
* For more configuration setting types, see {@link FeatureFlagConfigurationSetting} and
* {@link SecretReferenceConfigurationSetting}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the setting to read-only with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnlyWithResponse
* <pre>
* ConfigurationSetting resultSetting = configurationClient.setReadOnlyWithResponse&
* .setKey&
* .setLabel&
* true,
* Context.NONE&
* .getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnlyWithResponse
*
* <p>Clear read-only of the setting with the key-label "prodDBConnection"-"westUS".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.setReadOnlyWithResponse
* <pre>
* Response<ConfigurationSetting> responseSetting = configurationClient
* .setConfigurationSettingWithResponse&
* new ConfigurationSetting&
* new Context&
* System.out.printf&
* responseSetting.getValue&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.setReadOnlyWithResponse
*
* @param setting The configuration setting to set to read-only or not read-only based on the {@code isReadOnly}.
* @param isReadOnly Flag used to set the read-only status of the configuration. {@code true} will put the
* configuration into a read-only state, {@code false} will clear the state.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A REST response containing the read-only or not read-only ConfigurationSetting if {@code isReadOnly}
* is true or null, or false respectively. Or return {@code null} if the setting didn't exist.
* {@code null} is also returned if the {@link ConfigurationSetting
* (which will also throw HttpResponseException described below).
* @throws IllegalArgumentException If {@link ConfigurationSetting
* @throws HttpResponseException If {@link ConfigurationSetting
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSetting> setReadOnlyWithResponse(ConfigurationSetting setting, boolean isReadOnly,
Context context) {
validateSetting(setting);
final String key = setting.getKey();
final String label = setting.getLabel();
context = enableSyncRestProxy(addTracingNamespace(context));
return isReadOnly
? toConfigurationSettingWithResponse(serviceClient.putLockWithResponse(key, label, null, null, context))
: toConfigurationSettingWithResponse(serviceClient.deleteLockWithResponse(key, label, null, null, context));
}
/**
* Fetches the configuration settings that match the {@code selector}. If {@code selector} is {@code null}, then all
* the {@link ConfigurationSetting configuration settings} are fetched with their current values.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all settings that use the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listConfigurationSettings
* <pre>
* SettingSelector settingSelector = new SettingSelector&
* configurationClient.listConfigurationSettings&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listConfigurationSettings
*
* @param selector Optional. Selector to filter configuration setting results from the service.
* @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code selector}. If no options were
* provided, the List contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listConfigurationSettings(SettingSelector selector) {
return listConfigurationSettings(selector, Context.NONE);
}
/**
* Fetches the configuration settings that match the {@code selector}. If {@code selector} is {@code null}, then all
* the {@link ConfigurationSetting configuration settings} are fetched with their current values.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all settings that use the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listConfigurationSettings
* <pre>
* SettingSelector settingSelector = new SettingSelector&
* Context ctx = new Context&
* configurationClient.listConfigurationSettings&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listConfigurationSettings
*
* @param selector Optional. Selector to filter configuration setting results from the service.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code selector}. If no options were
* provided, the {@link PagedIterable} contains all the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their current
* values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listConfigurationSettingsForSnapshot -->
* <pre>
* String snapshotName = "&
* configurationClient.listConfigurationSettingsForSnapshot&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listConfigurationSettingsForSnapshot -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code selector}. If no options were
* provided, the List contains all of the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName) {
return listConfigurationSettingsForSnapshot(snapshotName, null, Context.NONE);
}
/**
* Fetches the configuration settings in a snapshot that matches the {@code snapshotName}. If {@code snapshotName}
* is {@code null}, then all the {@link ConfigurationSetting configuration settings} are fetched with their current
* values.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listConfigurationSettingsForSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* List<SettingFields> fields = Arrays.asList&
* Context ctx = new Context&
* configurationClient.listConfigurationSettingsForSnapshot&
* .forEach&
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listConfigurationSettingsForSnapshotMaxOverload -->
*
* @param snapshotName Optional. A filter used get {@link ConfigurationSetting}s for a snapshot. The value should
* be the name of the snapshot.
* @param fields Optional. The fields to select for the query response. If none are set, the service will return the
* ConfigurationSettings with a default set of properties.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code selector}. If no options were
* provided, the {@link PagedIterable} contains all the current settings in the service.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listConfigurationSettingsForSnapshot(String snapshotName,
List<SettingFields> fields,
Context context) {
return new PagedIterable<>(
() -> {
final PagedResponse<KeyValue> pagedResponse = serviceClient.getKeyValuesSinglePage(
null,
null,
null,
null,
fields,
snapshotName,
null,
null,
enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithPagedResponse(pagedResponse);
},
nextLink -> {
final PagedResponse<KeyValue> pagedResponse = serviceClient.getKeyValuesNextSinglePage(nextLink,
null, null, null, enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithPagedResponse(pagedResponse);
}
);
}
/**
* Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided
* in descending order from their {@link ConfigurationSetting
* Revisions expire after a period of time, see <a href="https:
* for more information.
*
*
* If {@code selector} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched
* in their current state. Otherwise, the results returned match the parameters given in {@code selector}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listRevisions
* <pre>
* SettingSelector settingSelector = new SettingSelector&
* configurationClient.listRevisions&
* System.out.printf&
* resp.getRequest&
* resp.getItems&
* System.out.printf&
* &
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listRevisions
*
* @param selector Optional. Used to filter configuration setting revisions from the service.
* @return {@link PagedIterable} of {@link ConfigurationSetting} revisions.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listRevisions(SettingSelector selector) {
return listRevisions(selector, Context.NONE);
}
/**
* Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided
* in descending order from their {@link ConfigurationSetting
* Revisions expire after a period of time, see <a href="https:
* for more information.
*
* If {@code selector} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched
* in their current state. Otherwise, the results returned match the parameters given in {@code selector}.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p>
*
* <!-- src_embed com.azure.data.applicationconfig.configurationclient.listRevisions
* <pre>
* SettingSelector settingSelector = new SettingSelector&
* Context ctx = new Context&
* configurationClient.listRevisions&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.applicationconfig.configurationclient.listRevisions
*
* @param selector Optional. Used to filter configuration setting revisions from the service.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link PagedIterable} of {@link ConfigurationSetting} revisions.
* @throws HttpResponseException If a client or service error occurs, such as a 404, 409, 429 or 500.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSetting> listRevisions(SettingSelector selector, Context context) {
final String acceptDateTime = selector == null ? null : selector.getAcceptDateTime();
return new PagedIterable<>(
() -> {
final PagedResponse<KeyValue> pagedResponse = serviceClient.getRevisionsSinglePage(
selector == null ? null : selector.getKeyFilter(),
selector == null ? null : selector.getLabelFilter(),
null,
acceptDateTime,
selector == null ? null : toSettingFieldsList(selector.getFields()),
enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithPagedResponse(pagedResponse);
},
nextLink -> {
final PagedResponse<KeyValue> pagedResponse = serviceClient.getRevisionsNextSinglePage(nextLink,
acceptDateTime, enableSyncRestProxy(addTracingNamespace(context)));
return toConfigurationSettingWithPagedResponse(pagedResponse);
}
);
}
/**
* Create a {@link ConfigurationSnapshot} by providing a snapshot name and a
* {@link ConfigurationSnapshot}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.beginCreateSnapshotMaxOverload -->
* <pre>
* List<ConfigurationSettingsFilter> filters = new ArrayList<>&
* &
* filters.add&
* String snapshotName = "&
* Context ctx = new Context&
*
* SyncPoller<PollOperationDetails, ConfigurationSnapshot> poller =
* client.beginCreateSnapshot&
* new ConfigurationSnapshot&
* poller.setPollInterval&
* poller.waitForCompletion&
* ConfigurationSnapshot snapshot = poller.getFinalResult&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.beginCreateSnapshotMaxOverload -->
*
* @param snapshotName The name of the {@link ConfigurationSnapshot} to create.
* @param snapshot The snapshot to create.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link SyncPoller} that polls the creating snapshot operation until it has completed or
* has failed. The completed operation returns a {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public SyncPoller<PollOperationDetails, ConfigurationSnapshot> beginCreateSnapshot(
String snapshotName, ConfigurationSnapshot snapshot, Context context) {
return createSnapshotUtilClient.beginCreateSnapshot(snapshotName, snapshot, context);
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.getSnapshotByName -->
* <pre>
* String snapshotName = "&
* ConfigurationSnapshot getSnapshot = client.getSnapshot&
* System.out.printf&
* getSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.getSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSnapshot getSnapshot(String snapshotName) {
return getSnapshotWithResponse(snapshotName, null, Context.NONE).getValue();
}
/**
* Get a {@link ConfigurationSnapshot} by given the snapshot name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.getSnapshotByNameMaxOverload -->
* <pre>
* String snapshotName = "&
* Context ctx = new Context&
* ConfigurationSnapshot getSnapshot = client.getSnapshotWithResponse&
* snapshotName,
* Arrays.asList&
* ctx&
* .getValue&
* &
* &
* System.out.printf&
* getSnapshot.getName&
* List<ConfigurationSettingsFilter> filters = getSnapshot.getFilters&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.getSnapshotByNameMaxOverload -->
*
* @param snapshotName the snapshot name.
* @param fields Used to select what fields are present in the returned resource(s).
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSnapshot> getSnapshotWithResponse(String snapshotName, List<SnapshotFields> fields,
Context context) {
final ResponseBase<GetSnapshotHeaders, ConfigurationSnapshot> response =
serviceClient.getSnapshotWithResponse(snapshotName, null, null, fields, context);
return new SimpleResponse<>(response, response.getValue());
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.archiveSnapshotByName -->
* <pre>
* String snapshotName = "&
* ConfigurationSnapshot archivedSnapshot = client.archiveSnapshot&
* System.out.printf&
* archivedSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.archiveSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSnapshot archiveSnapshot(String snapshotName) {
return updateSnapshotSync(snapshotName, null, ConfigurationSnapshotStatus.ARCHIVED, serviceClient,
Context.NONE).getValue();
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.archiveSnapshotByNameMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* Context ctx = new Context&
*
* ConfigurationSnapshot archivedSnapshot = client.archiveSnapshotWithResponse&
* .getValue&
* System.out.printf&
* archivedSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.archiveSnapshotByNameMaxOverload -->
*
* @param snapshotName the snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSnapshot> archiveSnapshotWithResponse(String snapshotName,
MatchConditions matchConditions, Context context) {
return updateSnapshotSync(snapshotName, matchConditions, ConfigurationSnapshotStatus.ARCHIVED, serviceClient,
context);
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.recoverSnapshotByName -->
* <pre>
* String snapshotName = "&
* ConfigurationSnapshot recoveredSnapshot = client.recoverSnapshot&
* System.out.printf&
* recoveredSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.recoverSnapshotByName -->
*
* @param snapshotName the snapshot name.
* @return A {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ConfigurationSnapshot recoverSnapshot(String snapshotName) {
return updateSnapshotSync(snapshotName, null, ConfigurationSnapshotStatus.READY, serviceClient,
Context.NONE).getValue();
}
/**
* Update a snapshot status from {@link ConfigurationSnapshotStatus
*
* <p>
* To turn on using 'if-match' header, set the second parameter 'ifUnchanged' to true.
* It used to perform an operation only if the targeted resource's ETag matches the value provided.
* Otherwise, it will throw an exception '412 Precondition Failed'.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.recoverSnapshotMaxOverload -->
* <pre>
* String snapshotName = "&
* MatchConditions matchConditions = new MatchConditions&
* Context ctx = new Context&
*
* ConfigurationSnapshot recoveredSnapshot = client.recoverSnapshotWithResponse&
* .getValue&
* System.out.printf&
* recoveredSnapshot.getName&
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.recoverSnapshotMaxOverload -->
*
* @param snapshotName the snapshot name.
* @param matchConditions Specifies HTTP options for conditional requests.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ConfigurationSnapshot> recoverSnapshotWithResponse(String snapshotName,
MatchConditions matchConditions, Context context) {
return updateSnapshotSync(snapshotName, matchConditions, ConfigurationSnapshotStatus.READY, serviceClient,
context);
}
/**
* List snapshots by given {@link SnapshotSelector}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.listSnapshots -->
* <pre>
* String snapshotNameFilter = "&
* client.listSnapshots&
* .forEach&
* System.out.printf&
* snapshotResult.getName&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.listSnapshots -->
*
* @param selector Optional. Used to filter {@link ConfigurationSnapshot} from the service.
* @return A {@link PagedIterable} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSnapshot> listSnapshots(SnapshotSelector selector) {
return listSnapshots(selector, Context.NONE);
}
/**
* List snapshots by given {@link SnapshotSelector}.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.data.appconfiguration.configurationclient.listSnapshotsMaxOverload -->
* <pre>
* String snapshotNameFilter = "&
* Context ctx = new Context&
*
* client.listSnapshots&
* .forEach&
* System.out.printf&
* snapshotResult.getName&
* &
* </pre>
* <!-- end com.azure.data.appconfiguration.configurationclient.listSnapshotsMaxOverload -->
*
* @param selector Optional. Used to filter {@link ConfigurationSnapshot} from the service.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link PagedIterable} of {@link ConfigurationSnapshot}.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ConfigurationSnapshot> listSnapshots(SnapshotSelector selector, Context context) {
return new PagedIterable<>(
() -> serviceClient.getSnapshotsSinglePage(
selector == null ? null : selector.getNameFilter(),
null,
selector == null ? null : selector.getFields(),
selector == null ? null : selector.getStatus(),
enableSyncRestProxy(addTracingNamespace(context))),
nextLink -> serviceClient.getSnapshotsNextSinglePage(nextLink,
enableSyncRestProxy(addTracingNamespace(context))));
}
/**
* Adds an external synchronization token to ensure service requests receive up-to-date values.
*
* @param token an external synchronization token to ensure service requests receive up-to-date values.
* @throws NullPointerException if the given token is null.
*/
public void updateSyncToken(String token) {
syncTokenPolicy.updateSyncToken(token);
}
} |
logging happens inside isMatchingEvent already | public void sendBatchPartitionKeyValidate() throws InterruptedException {
final String messageValue = UUID.randomUUID().toString();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null,
PARTITION_KEY, contextProvider, DEFAULT_INSTRUMENTATION);
int count = 0;
while (count < 10) {
final EventData data = createData();
data.getProperties().put(MESSAGE_ID, messageValue);
if (!batch.tryAdd(data)) {
break;
}
count++;
}
final CountDownLatch countDownLatch = new CountDownLatch(batch.getCount());
final Instant now = Instant.now();
final List<String> partitionIds = producer.getPartitionIds().collectList().block(TIMEOUT);
Assertions.assertNotNull(partitionIds);
for (String id : partitionIds) {
final EventHubConsumerAsyncClient consumer = toClose(builder.buildAsyncConsumerClient());
toClose(consumer.receiveFromPartition(id, EventPosition.fromEnqueuedTime(now))
.subscribe(partitionEvent -> {
if (isMatchingEvent(partitionEvent.getData(), messageValue)) {
countDownLatch.countDown();
}
}));
}
producer.send(batch.getEvents(), sendOptions).block();
assertTrue(countDownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS));
} | if (isMatchingEvent(partitionEvent.getData(), messageValue)) { | public void sendBatchPartitionKeyValidate() throws InterruptedException {
final String messageValue = UUID.randomUUID().toString();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null,
PARTITION_KEY, contextProvider, DEFAULT_INSTRUMENTATION);
int count = 0;
while (count < 10) {
final EventData data = createData();
data.getProperties().put(MESSAGE_ID, messageValue);
if (!batch.tryAdd(data)) {
break;
}
count++;
}
final CountDownLatch countDownLatch = new CountDownLatch(batch.getCount());
final Instant now = Instant.now();
final List<String> partitionIds = producer.getPartitionIds().collectList().block(TIMEOUT);
Assertions.assertNotNull(partitionIds);
for (String id : partitionIds) {
final EventHubConsumerAsyncClient consumer = toClose(builder.buildAsyncConsumerClient());
toClose(consumer.receiveFromPartition(id, EventPosition.fromEnqueuedTime(now))
.subscribe(partitionEvent -> {
if (isMatchingEvent(partitionEvent.getData(), messageValue)) {
countDownLatch.countDown();
}
}));
}
producer.send(batch.getEvents(), sendOptions).block();
assertTrue(countDownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS));
} | class EventDataBatchIntegrationTest extends IntegrationTestBase {
private static final String PARTITION_KEY = "PartitionIDCopyFromProducerOption";
private static final EventHubsProducerInstrumentation DEFAULT_INSTRUMENTATION = new EventHubsProducerInstrumentation(null, null, "fqdn", "entity");
private EventHubProducerAsyncClient producer;
private EventHubClientBuilder builder;
@Mock
private ErrorContextProvider contextProvider;
public EventDataBatchIntegrationTest() {
super(new ClientLogger(EventDataBatchIntegrationTest.class));
}
@Override
protected void beforeTest() {
MockitoAnnotations.initMocks(this);
builder = createBuilder()
.shareConnection()
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.prefetchCount(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT);
producer = toClose(builder
.retryOptions(RETRY_OPTIONS)
.buildAsyncProducerClient());
}
/**
* Test for sending full batch without partition key
*/
@Test
public void sendSmallEventsFullBatch() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, null, contextProvider,
DEFAULT_INSTRUMENTATION);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.expectComplete()
.verify(TIMEOUT);
}
/**
* Test for sending a message batch that is {@link ClientConstants
*/
@Test
public void sendSmallEventsFullBatchPartitionKey() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null,
PARTITION_KEY, contextProvider, DEFAULT_INSTRUMENTATION);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.expectComplete()
.verify(TIMEOUT);
}
/**
* Verifies that when we send 10 messages with the same partition key and some application properties, the received
* EventData also contains the {@link EventData
*/
@Test
/**
* Verify we can send a batch by specifying the {@code maxMessageSize} and partition key.
*/
@Test
public void sendEventsFullBatchWithPartitionKey() {
final int maxMessageSize = 1024;
final EventDataBatch batch = new EventDataBatch(maxMessageSize, null, PARTITION_KEY, contextProvider,
DEFAULT_INSTRUMENTATION);
final Random random = new Random();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
int count = 0;
while (true) {
final EventData eventData = new EventData("a".getBytes());
for (int i = 0; i < random.nextInt(20); i++) {
eventData.getProperties().put("key" + i, "value");
}
if (batch.tryAdd(eventData)) {
count++;
} else {
break;
}
}
Assertions.assertEquals(count, batch.getCount());
StepVerifier.create(producer.send(batch.getEvents(), sendOptions))
.expectComplete()
.verify(TIMEOUT);
}
private static EventData createData() {
return new EventData("a".getBytes(StandardCharsets.UTF_8));
}
} | class EventDataBatchIntegrationTest extends IntegrationTestBase {
private static final String PARTITION_KEY = "PartitionIDCopyFromProducerOption";
private static final EventHubsProducerInstrumentation DEFAULT_INSTRUMENTATION = new EventHubsProducerInstrumentation(null, null, "fqdn", "entity");
private EventHubProducerAsyncClient producer;
private EventHubClientBuilder builder;
@Mock
private ErrorContextProvider contextProvider;
public EventDataBatchIntegrationTest() {
super(new ClientLogger(EventDataBatchIntegrationTest.class));
}
@Override
protected void beforeTest() {
MockitoAnnotations.initMocks(this);
builder = createBuilder()
.shareConnection()
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.prefetchCount(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT);
producer = toClose(builder
.retryOptions(RETRY_OPTIONS)
.buildAsyncProducerClient());
}
/**
* Test for sending full batch without partition key
*/
@Test
public void sendSmallEventsFullBatch() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, null, contextProvider,
DEFAULT_INSTRUMENTATION);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.expectComplete()
.verify(TIMEOUT);
}
/**
* Test for sending a message batch that is {@link ClientConstants
*/
@Test
public void sendSmallEventsFullBatchPartitionKey() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null,
PARTITION_KEY, contextProvider, DEFAULT_INSTRUMENTATION);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.expectComplete()
.verify(TIMEOUT);
}
/**
* Verifies that when we send 10 messages with the same partition key and some application properties, the received
* EventData also contains the {@link EventData
*/
@Test
/**
* Verify we can send a batch by specifying the {@code maxMessageSize} and partition key.
*/
@Test
public void sendEventsFullBatchWithPartitionKey() {
final int maxMessageSize = 1024;
final EventDataBatch batch = new EventDataBatch(maxMessageSize, null, PARTITION_KEY, contextProvider,
DEFAULT_INSTRUMENTATION);
final Random random = new Random();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
int count = 0;
while (true) {
final EventData eventData = new EventData("a".getBytes());
for (int i = 0; i < random.nextInt(20); i++) {
eventData.getProperties().put("key" + i, "value");
}
if (batch.tryAdd(eventData)) {
count++;
} else {
break;
}
}
Assertions.assertEquals(count, batch.getCount());
StepVerifier.create(producer.send(batch.getEvents(), sendOptions))
.expectComplete()
.verify(TIMEOUT);
}
private static EventData createData() {
return new EventData("a".getBytes(StandardCharsets.UTF_8));
}
} |
this supposes to fix transient timeout (sets timeout to 3 sec instead of 1 min) so that tests have a chance to retry things | protected void beforeTest() {
MockitoAnnotations.initMocks(this);
builder = createBuilder()
.shareConnection()
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.prefetchCount(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT);
producer = toClose(builder
.retryOptions(RETRY_OPTIONS)
.buildAsyncProducerClient());
} | .retryOptions(RETRY_OPTIONS) | protected void beforeTest() {
MockitoAnnotations.initMocks(this);
builder = createBuilder()
.shareConnection()
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.prefetchCount(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT);
producer = toClose(builder
.retryOptions(RETRY_OPTIONS)
.buildAsyncProducerClient());
} | class EventDataBatchIntegrationTest extends IntegrationTestBase {
private static final String PARTITION_KEY = "PartitionIDCopyFromProducerOption";
private static final EventHubsProducerInstrumentation DEFAULT_INSTRUMENTATION = new EventHubsProducerInstrumentation(null, null, "fqdn", "entity");
private EventHubProducerAsyncClient producer;
private EventHubClientBuilder builder;
@Mock
private ErrorContextProvider contextProvider;
public EventDataBatchIntegrationTest() {
super(new ClientLogger(EventDataBatchIntegrationTest.class));
}
@Override
/**
* Test for sending full batch without partition key
*/
@Test
public void sendSmallEventsFullBatch() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, null, contextProvider,
DEFAULT_INSTRUMENTATION);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.expectComplete()
.verify(TIMEOUT);
}
/**
* Test for sending a message batch that is {@link ClientConstants
*/
@Test
public void sendSmallEventsFullBatchPartitionKey() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null,
PARTITION_KEY, contextProvider, DEFAULT_INSTRUMENTATION);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.expectComplete()
.verify(TIMEOUT);
}
/**
* Verifies that when we send 10 messages with the same partition key and some application properties, the received
* EventData also contains the {@link EventData
*/
@Test
public void sendBatchPartitionKeyValidate() throws InterruptedException {
final String messageValue = UUID.randomUUID().toString();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null,
PARTITION_KEY, contextProvider, DEFAULT_INSTRUMENTATION);
int count = 0;
while (count < 10) {
final EventData data = createData();
data.getProperties().put(MESSAGE_ID, messageValue);
if (!batch.tryAdd(data)) {
break;
}
count++;
}
final CountDownLatch countDownLatch = new CountDownLatch(batch.getCount());
final Instant now = Instant.now();
final List<String> partitionIds = producer.getPartitionIds().collectList().block(TIMEOUT);
Assertions.assertNotNull(partitionIds);
for (String id : partitionIds) {
final EventHubConsumerAsyncClient consumer = toClose(builder.buildAsyncConsumerClient());
toClose(consumer.receiveFromPartition(id, EventPosition.fromEnqueuedTime(now))
.subscribe(partitionEvent -> {
if (isMatchingEvent(partitionEvent.getData(), messageValue)) {
countDownLatch.countDown();
}
}));
}
producer.send(batch.getEvents(), sendOptions).block();
assertTrue(countDownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS));
}
/**
* Verify we can send a batch by specifying the {@code maxMessageSize} and partition key.
*/
@Test
public void sendEventsFullBatchWithPartitionKey() {
final int maxMessageSize = 1024;
final EventDataBatch batch = new EventDataBatch(maxMessageSize, null, PARTITION_KEY, contextProvider,
DEFAULT_INSTRUMENTATION);
final Random random = new Random();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
int count = 0;
while (true) {
final EventData eventData = new EventData("a".getBytes());
for (int i = 0; i < random.nextInt(20); i++) {
eventData.getProperties().put("key" + i, "value");
}
if (batch.tryAdd(eventData)) {
count++;
} else {
break;
}
}
Assertions.assertEquals(count, batch.getCount());
StepVerifier.create(producer.send(batch.getEvents(), sendOptions))
.expectComplete()
.verify(TIMEOUT);
}
private static EventData createData() {
return new EventData("a".getBytes(StandardCharsets.UTF_8));
}
} | class EventDataBatchIntegrationTest extends IntegrationTestBase {
private static final String PARTITION_KEY = "PartitionIDCopyFromProducerOption";
private static final EventHubsProducerInstrumentation DEFAULT_INSTRUMENTATION = new EventHubsProducerInstrumentation(null, null, "fqdn", "entity");
private EventHubProducerAsyncClient producer;
private EventHubClientBuilder builder;
@Mock
private ErrorContextProvider contextProvider;
public EventDataBatchIntegrationTest() {
super(new ClientLogger(EventDataBatchIntegrationTest.class));
}
@Override
/**
* Test for sending full batch without partition key
*/
@Test
public void sendSmallEventsFullBatch() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, null, contextProvider,
DEFAULT_INSTRUMENTATION);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.expectComplete()
.verify(TIMEOUT);
}
/**
* Test for sending a message batch that is {@link ClientConstants
*/
@Test
public void sendSmallEventsFullBatchPartitionKey() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null,
PARTITION_KEY, contextProvider, DEFAULT_INSTRUMENTATION);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.expectComplete()
.verify(TIMEOUT);
}
/**
* Verifies that when we send 10 messages with the same partition key and some application properties, the received
* EventData also contains the {@link EventData
*/
@Test
public void sendBatchPartitionKeyValidate() throws InterruptedException {
final String messageValue = UUID.randomUUID().toString();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null,
PARTITION_KEY, contextProvider, DEFAULT_INSTRUMENTATION);
int count = 0;
while (count < 10) {
final EventData data = createData();
data.getProperties().put(MESSAGE_ID, messageValue);
if (!batch.tryAdd(data)) {
break;
}
count++;
}
final CountDownLatch countDownLatch = new CountDownLatch(batch.getCount());
final Instant now = Instant.now();
final List<String> partitionIds = producer.getPartitionIds().collectList().block(TIMEOUT);
Assertions.assertNotNull(partitionIds);
for (String id : partitionIds) {
final EventHubConsumerAsyncClient consumer = toClose(builder.buildAsyncConsumerClient());
toClose(consumer.receiveFromPartition(id, EventPosition.fromEnqueuedTime(now))
.subscribe(partitionEvent -> {
if (isMatchingEvent(partitionEvent.getData(), messageValue)) {
countDownLatch.countDown();
}
}));
}
producer.send(batch.getEvents(), sendOptions).block();
assertTrue(countDownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS));
}
/**
* Verify we can send a batch by specifying the {@code maxMessageSize} and partition key.
*/
@Test
public void sendEventsFullBatchWithPartitionKey() {
final int maxMessageSize = 1024;
final EventDataBatch batch = new EventDataBatch(maxMessageSize, null, PARTITION_KEY, contextProvider,
DEFAULT_INSTRUMENTATION);
final Random random = new Random();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
int count = 0;
while (true) {
final EventData eventData = new EventData("a".getBytes());
for (int i = 0; i < random.nextInt(20); i++) {
eventData.getProperties().put("key" + i, "value");
}
if (batch.tryAdd(eventData)) {
count++;
} else {
break;
}
}
Assertions.assertEquals(count, batch.getCount());
StepVerifier.create(producer.send(batch.getEvents(), sendOptions))
.expectComplete()
.verify(TIMEOUT);
}
private static EventData createData() {
return new EventData("a".getBytes(StandardCharsets.UTF_8));
}
} |
This should use `TestResourceNamer` to get a now datetime, I believe this may break with the current design | public void canUseSasTokenToCreateValidTableClient() {
final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("a");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(serviceClient.getServiceEndpoint())
.sasToken(sas)
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
} | final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); | public void canUseSasTokenToCreateValidTableClient() {
final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("a");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(serviceClient.getServiceEndpoint())
.sasToken(sas)
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
} | class TableServiceClientTest extends TableServiceClientTestBase {
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = TestUtils.isCosmosTest();
private TableServiceClient serviceClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
@Override
protected void beforeTest() {
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
serviceClient = getClientBuilder(connectionString).buildClient();
}
@Test
public void serviceCreateTable() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTable(tableName));
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void serviceCreateTableWithMultipleTenants() {
Assumptions.assumeTrue(serviceClient.getServiceEndpoint().contains("core.windows.net")
&& serviceClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
String tableName = testResourceNamer.randomName("tableName", 20);
String secondTableName = testResourceNamer.randomName("secondTableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableServiceClient tableServiceClient =
getClientBuilder(Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
assertNotNull(tableServiceClient.createTable(tableName));
assertNotNull(tableServiceClient.createTable(secondTableName));
}
@Test
public void serviceCreateTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableWithResponse(tableName, null, null).getValue());
}
@Test
public void serviceCreateTableFailsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertThrows(TableServiceException.class, () -> serviceClient.createTable(tableName));
}
@Test
public void serviceCreateTableIfNotExists() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
}
@Test
public void serviceCreateTableIfNotExistsWithResponseSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 409;
serviceClient.createTable(tableName);
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNull(response.getValue());
}
@Test
public void serviceDeleteTable() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertDoesNotThrow(() -> serviceClient.deleteTable(tableName));
}
@Test
public void serviceDeleteNonExistingTable() {
final String tableName = testResourceNamer.randomName("test", 20);
assertDoesNotThrow(() -> serviceClient.createTable(tableName));
}
@Test
public void serviceDeleteTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
serviceClient.createTable(tableName);
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceDeleteNonExistingTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 404;
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceListTables() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
Iterator<PagedResponse<TableItem>> iterator = serviceClient.listTables().iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertTrue(2 <= iterator.next().getValue().size());
}
@Test
public void serviceListTablesWithFilter() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setFilter("TableName eq '" + tableName + "'");
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.listTables(options, null, null)
.forEach(tableItem -> assertEquals(tableName, tableItem.getName()));
}
@Test
public void serviceListTablesWithTop() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
final String tableName3 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setTop(2);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.createTable(tableName3);
Iterator<PagedResponse<TableItem>> iterator =
serviceClient.listTables(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
@Test
public void serviceGetTableClient() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
TableClient tableClient = serviceClient.getTableClient(tableName);
TableClientTest.getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
@Test
public void generateAccountSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("r");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateAccountSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("rdau");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange);
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=rdau"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setGetProperties() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and getting properties is not supported on Cosmos endpoints.");
TableServiceRetentionPolicy retentionPolicy = new TableServiceRetentionPolicy()
.setDaysToRetain(5)
.setEnabled(true);
TableServiceLogging logging = new TableServiceLogging()
.setReadLogged(true)
.setAnalyticsVersion("1.0")
.setRetentionPolicy(retentionPolicy);
List<TableServiceCorsRule> corsRules = new ArrayList<>();
corsRules.add(new TableServiceCorsRule()
.setAllowedMethods("GET,PUT,HEAD")
.setAllowedOrigins("*")
.setAllowedHeaders("x-ms-version")
.setExposedHeaders("x-ms-client-request-id")
.setMaxAgeInSeconds(10));
TableServiceMetrics hourMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceMetrics minuteMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceProperties sentProperties = new TableServiceProperties()
.setLogging(logging)
.setCorsRules(corsRules)
.setMinuteMetrics(minuteMetrics)
.setHourMetrics(hourMetrics);
Response<Void> response = serviceClient.setPropertiesWithResponse(sentProperties, null, null);
assertNotNull(response.getHeaders().getValue("x-ms-request-id"));
assertNotNull(response.getHeaders().getValue("x-ms-version"));
sleepIfRunningAgainstService(20000);
TableServiceProperties retrievedProperties = serviceClient.getProperties();
assertPropertiesEquals(sentProperties, retrievedProperties);
}
@Test
public void getStatistics() throws URISyntaxException {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Getting statistics is not supported on Cosmos endpoints.");
URI primaryEndpoint = new URI(serviceClient.getServiceEndpoint());
String[] hostParts = primaryEndpoint.getHost().split("\\.");
StringJoiner secondaryHostJoiner = new StringJoiner(".");
secondaryHostJoiner.add(hostParts[0] + "-secondary");
for (int i = 1; i < hostParts.length; i++) {
secondaryHostJoiner.add(hostParts[i]);
}
String secondaryEndpoint = primaryEndpoint.getScheme() + ":
TableServiceClient secondaryClient = new TableServiceClientBuilder()
.endpoint(secondaryEndpoint)
.serviceVersion(serviceClient.getServiceVersion())
.pipeline(serviceClient.getHttpPipeline())
.buildClient();
TableServiceStatistics statistics = secondaryClient.getStatistics();
assertNotNull(statistics);
assertNotNull(statistics.getGeoReplication());
assertNotNull(statistics.getGeoReplication().getStatus());
assertNotNull(statistics.getGeoReplication().getLastSyncTime());
}
} | class TableServiceClientTest extends TableServiceClientTestBase {
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = TestUtils.isCosmosTest();
private TableServiceClient serviceClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
@Override
protected void beforeTest() {
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
serviceClient = getClientBuilder(connectionString).buildClient();
}
@Test
public void serviceCreateTable() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTable(tableName));
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void serviceCreateTableWithMultipleTenants() {
Assumptions.assumeTrue(serviceClient.getServiceEndpoint().contains("core.windows.net")
&& serviceClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
String tableName = testResourceNamer.randomName("tableName", 20);
String secondTableName = testResourceNamer.randomName("secondTableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableServiceClient tableServiceClient =
getClientBuilder(Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
assertNotNull(tableServiceClient.createTable(tableName));
assertNotNull(tableServiceClient.createTable(secondTableName));
}
@Test
public void serviceCreateTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableWithResponse(tableName, null, null).getValue());
}
@Test
public void serviceCreateTableFailsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertThrows(TableServiceException.class, () -> serviceClient.createTable(tableName));
}
@Test
public void serviceCreateTableIfNotExists() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
}
@Test
public void serviceCreateTableIfNotExistsWithResponseSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 409;
serviceClient.createTable(tableName);
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNull(response.getValue());
}
@Test
public void serviceDeleteTable() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertDoesNotThrow(() -> serviceClient.deleteTable(tableName));
}
@Test
public void serviceDeleteNonExistingTable() {
final String tableName = testResourceNamer.randomName("test", 20);
assertDoesNotThrow(() -> serviceClient.createTable(tableName));
}
@Test
public void serviceDeleteTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
serviceClient.createTable(tableName);
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceDeleteNonExistingTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 404;
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceListTables() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
Iterator<PagedResponse<TableItem>> iterator = serviceClient.listTables().iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertTrue(2 <= iterator.next().getValue().size());
}
@Test
public void serviceListTablesWithFilter() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setFilter("TableName eq '" + tableName + "'");
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.listTables(options, null, null)
.forEach(tableItem -> assertEquals(tableName, tableItem.getName()));
}
@Test
public void serviceListTablesWithTop() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
final String tableName3 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setTop(2);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.createTable(tableName3);
Iterator<PagedResponse<TableItem>> iterator =
serviceClient.listTables(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
@Test
public void serviceGetTableClient() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
TableClient tableClient = serviceClient.getTableClient(tableName);
TableClientTest.getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
@Test
public void generateAccountSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("r");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateAccountSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("rdau");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange);
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=rdau"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setGetProperties() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and getting properties is not supported on Cosmos endpoints.");
TableServiceRetentionPolicy retentionPolicy = new TableServiceRetentionPolicy()
.setDaysToRetain(5)
.setEnabled(true);
TableServiceLogging logging = new TableServiceLogging()
.setReadLogged(true)
.setAnalyticsVersion("1.0")
.setRetentionPolicy(retentionPolicy);
List<TableServiceCorsRule> corsRules = new ArrayList<>();
corsRules.add(new TableServiceCorsRule()
.setAllowedMethods("GET,PUT,HEAD")
.setAllowedOrigins("*")
.setAllowedHeaders("x-ms-version")
.setExposedHeaders("x-ms-client-request-id")
.setMaxAgeInSeconds(10));
TableServiceMetrics hourMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceMetrics minuteMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceProperties sentProperties = new TableServiceProperties()
.setLogging(logging)
.setCorsRules(corsRules)
.setMinuteMetrics(minuteMetrics)
.setHourMetrics(hourMetrics);
Response<Void> response = serviceClient.setPropertiesWithResponse(sentProperties, null, null);
assertNotNull(response.getHeaders().getValue("x-ms-request-id"));
assertNotNull(response.getHeaders().getValue("x-ms-version"));
sleepIfRunningAgainstService(20000);
TableServiceProperties retrievedProperties = serviceClient.getProperties();
assertPropertiesEquals(sentProperties, retrievedProperties);
}
@Test
public void getStatistics() throws URISyntaxException {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Getting statistics is not supported on Cosmos endpoints.");
URI primaryEndpoint = new URI(serviceClient.getServiceEndpoint());
String[] hostParts = primaryEndpoint.getHost().split("\\.");
StringJoiner secondaryHostJoiner = new StringJoiner(".");
secondaryHostJoiner.add(hostParts[0] + "-secondary");
for (int i = 1; i < hostParts.length; i++) {
secondaryHostJoiner.add(hostParts[i]);
}
String secondaryEndpoint = primaryEndpoint.getScheme() + ":
TableServiceClient secondaryClient = new TableServiceClientBuilder()
.endpoint(secondaryEndpoint)
.serviceVersion(serviceClient.getServiceVersion())
.pipeline(serviceClient.getHttpPipeline())
.buildClient();
TableServiceStatistics statistics = secondaryClient.getStatistics();
assertNotNull(statistics);
assertNotNull(statistics.getGeoReplication());
assertNotNull(statistics.getGeoReplication().getStatus());
assertNotNull(statistics.getGeoReplication().getLastSyncTime());
}
} |
We should update this to take today's date (`OffsetDateTime.now()`) and then add some arbitrary amount of time to it instead of hard-coding some datetime. | public void canUseSasTokenToCreateValidTableClient() {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Skipping Cosmos test.");
final OffsetDateTime expiryTime = OffsetDateTime.of(2024, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("a");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(tableClient.getTableEndpoint())
.sasToken(sas)
.tableName(tableClient.getTableName());
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
} | final OffsetDateTime expiryTime = OffsetDateTime.of(2024, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC); | public void canUseSasTokenToCreateValidTableClient() {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Skipping Cosmos test.");
final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
final TableSasPermission permissions = TableSasPermission.parse("a");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(tableClient.getTableEndpoint())
.sasToken(sas)
.tableName(tableClient.getTableName());
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
} | class TableAsyncClientTest extends TableClientTestBase {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(100);
private TableAsyncClient tableClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
}
protected void beforeTest() {
final String tableName = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
tableClient = getClientBuilder(tableName, connectionString).buildAsyncClient();
tableClient.createTable().block(DEFAULT_TIMEOUT);
}
@Test
public void createTable() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableAsyncClient tableClient2 = getClientBuilder(tableName2, connectionString).buildAsyncClient();
StepVerifier.create(tableClient2.createTable())
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void createTableWithMultipleTenants() {
Assumptions.assumeTrue(tableClient.getTableEndpoint().contains("core.windows.net")
&& tableClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
final String tableName2 = testResourceNamer.randomName("tableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableAsyncClient tableClient2 =
getClientBuilder(tableName2, Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
StepVerifier.create(tableClient2.createTable())
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
StepVerifier.create(tableClient2.createEntity(tableEntity))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void createTableWithResponse() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableAsyncClient tableClient2 = getClientBuilder(tableName2, connectionString).buildAsyncClient();
final int expectedStatusCode = 204;
StepVerifier.create(tableClient2.createTableWithResponse())
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void createEntity() {
createEntityImpl("partitionKey", "rowKey");
}
@Test
public void createEntityWithSingleQuotesInPartitionKey() {
createEntityImpl("partition'Key", "rowKey");
}
@Test
public void createEntityWithSingleQuotesInRowKey() {
createEntityImpl("partitionKey", "row'Key");
}
private void createEntityImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
StepVerifier.create(tableClient.createEntity(tableEntity))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void createEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void createEntityWithAllSupportedDataTypes() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final boolean booleanValue = true;
final byte[] binaryValue = "Test value".getBytes();
final Date dateValue = new Date();
final OffsetDateTime offsetDateTimeValue = OffsetDateTime.now();
final double doubleValue = 2.0d;
final UUID guidValue = UUID.randomUUID();
final int int32Value = 1337;
final long int64Value = 1337L;
final String stringValue = "This is table entity";
tableEntity.addProperty("BinaryTypeProperty", binaryValue);
tableEntity.addProperty("BooleanTypeProperty", booleanValue);
tableEntity.addProperty("DateTypeProperty", dateValue);
tableEntity.addProperty("OffsetDateTimeTypeProperty", offsetDateTimeValue);
tableEntity.addProperty("DoubleTypeProperty", doubleValue);
tableEntity.addProperty("GuidTypeProperty", guidValue);
tableEntity.addProperty("Int32TypeProperty", int32Value);
tableEntity.addProperty("Int64TypeProperty", int64Value);
tableEntity.addProperty("StringTypeProperty", stringValue);
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.get("BinaryTypeProperty") instanceof byte[]);
assertTrue(properties.get("BooleanTypeProperty") instanceof Boolean);
assertTrue(properties.get("DateTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("OffsetDateTimeTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("DoubleTypeProperty") instanceof Double);
assertTrue(properties.get("GuidTypeProperty") instanceof UUID);
assertTrue(properties.get("Int32TypeProperty") instanceof Integer);
assertTrue(properties.get("Int64TypeProperty") instanceof Long);
assertTrue(properties.get("StringTypeProperty") instanceof String);
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/*@Test
public void createEntitySubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
SampleEntity tableEntity = new SampleEntity(partitionKeyValue, rowKeyValue);
tableEntity.setByteField(bytes);
tableEntity.setBooleanField(b);
tableEntity.setDateTimeField(dateTime);
tableEntity.setDoubleField(d);
tableEntity.setUuidField(uuid);
tableEntity.setIntField(i);
tableEntity.setLongField(l);
tableEntity.setStringField(s);
tableEntity.setEnumField(color);
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
TableEntity entity = response.getValue();
assertArrayEquals((byte[]) entity.getProperties().get("ByteField"), bytes);
assertEquals(entity.getProperties().get("BooleanField"), b);
assertTrue(dateTime.isEqual((OffsetDateTime) entity.getProperties().get("DateTimeField")));
assertEquals(entity.getProperties().get("DoubleField"), d);
assertEquals(0, uuid.compareTo((UUID) entity.getProperties().get("UuidField")));
assertEquals(entity.getProperties().get("IntField"), i);
assertEquals(entity.getProperties().get("LongField"), l);
assertEquals(entity.getProperties().get("StringField"), s);
assertEquals(entity.getProperties().get("EnumField"), color.name());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}*/
@Test
public void deleteTable() {
StepVerifier.create(tableClient.deleteTable())
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteNonExistingTable() {
tableClient.deleteTable().block();
StepVerifier.create(tableClient.deleteTable())
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteTableWithResponse() {
final int expectedStatusCode = 204;
StepVerifier.create(tableClient.deleteTableWithResponse())
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteNonExistingTableWithResponse() {
final int expectedStatusCode = 404;
tableClient.deleteTableWithResponse().block();
StepVerifier.create(tableClient.deleteTableWithResponse())
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteEntity() {
deleteEntityImpl("partitionKey", "rowKey");
}
@Test
public void deleteEntityWithSingleQuotesInPartitionKey() {
deleteEntityImpl("partition'Key", "rowKey");
}
@Test
public void deleteEntityWithSingleQuotesInRowKey() {
deleteEntityImpl("partitionKey", "row'Key");
}
private void deleteEntityImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(DEFAULT_TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteNonExistingEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(DEFAULT_TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntityWithResponse(createdEntity, false))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteNonExistingEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 404;
StepVerifier.create(tableClient.deleteEntityWithResponse(entity, false))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteEntityWithResponseMatchETag() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(DEFAULT_TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntityWithResponse(createdEntity, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void getEntityWithSingleQuotesInPartitionKey() {
getEntityWithResponseAsyncImpl(this.tableClient, this.testResourceNamer, "partition'Key", "rowKey");
}
@Test
public void getEntityWithSingleQuotesInRowKey() {
getEntityWithResponseAsyncImpl(this.tableClient, this.testResourceNamer, "partitionKey", "row'Key");
}
@Test
public void getEntityWithResponse() {
getEntityWithResponseAsyncImpl(this.tableClient, this.testResourceNamer, "partitionKey", "rowKey");
}
static void getEntityWithResponseAsyncImpl(TableAsyncClient tableClient, TestResourceNamer testResourceNamer,
String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void getEntityWithResponseWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.addProperty("Test", "Value");
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
List<String> propertyList = new ArrayList<>();
propertyList.add("Test");
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, propertyList))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertNull(entity.getPartitionKey());
assertNull(entity.getRowKey());
assertNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertEquals(entity.getProperties().get("Test"), "Value");
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void updateEntityWithSingleQuotesInPartitionKey() {
updateEntityWithResponseAsync(TableEntityUpdateMode.REPLACE, "partition'Key", "rowKey");
}
@Test
public void updateEntityWithSingleQuotesInRowKey() {
updateEntityWithResponseAsync(TableEntityUpdateMode.REPLACE, "partitionKey", "row'Key");
}
/*@Test
public void getEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
final Map<String, Object> props = new HashMap<>();
props.put("ByteField", bytes);
props.put("BooleanField", b);
props.put("DateTimeField", dateTime);
props.put("DoubleField", d);
props.put("UuidField", uuid);
props.put("IntField", i);
props.put("LongField", l);
props.put("StringField", s);
props.put("EnumField", color);
TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.setProperties(props);
int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, SampleEntity.class))
.assertNext(response -> {
SampleEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertArrayEquals(bytes, entity.getByteField());
assertEquals(b, entity.getBooleanField());
assertTrue(dateTime.isEqual(entity.getDateTimeField()));
assertEquals(d, entity.getDoubleField());
assertEquals(0, uuid.compareTo(entity.getUuidField()));
assertEquals(i, entity.getIntField());
assertEquals(l, entity.getLongField());
assertEquals(s, entity.getStringField());
assertEquals(color, entity.getEnumField());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}*/
@Test
public void updateEntityWithResponseReplace() {
updateEntityWithResponseAsync(TableEntityUpdateMode.REPLACE, "partitionKey", "rowKey");
}
@Test
public void updateEntityWithResponseMerge() {
updateEntityWithResponseAsync(TableEntityUpdateMode.MERGE, "partitionKey", "rowKey");
}
/**
* In the case of {@link TableEntityUpdateMode
* In the case of {@link TableEntityUpdateMode
*/
void updateEntityWithResponseAsync(TableEntityUpdateMode mode, String partitionKeyPrefix, String rowKeyPrefix) {
final boolean expectOldProperty = mode == TableEntityUpdateMode.MERGE;
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final int expectedStatusCode = 204;
final String oldPropertyKey = "propertyA";
final String newPropertyKey = "propertyB";
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty(oldPropertyKey, "valueA");
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(DEFAULT_TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
createdEntity.getProperties().remove(oldPropertyKey);
createdEntity.addProperty(newPropertyKey, "valueB");
StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, mode, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue))
.assertNext(entity -> {
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey(newPropertyKey));
assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey));
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/*@Test
public void updateEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
int expectedStatusCode = 204;
SingleFieldEntity tableEntity = new SingleFieldEntity(partitionKeyValue, rowKeyValue);
tableEntity.setSubclassProperty("InitialValue");
tableClient.createEntity(tableEntity).block(TIMEOUT);
tableEntity.setSubclassProperty("UpdatedValue");
StepVerifier.create(tableClient.updateEntityWithResponse(tableEntity, TableEntityUpdateMode.REPLACE, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue))
.assertNext(entity -> {
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey("SubclassProperty"));
assertEquals("UpdatedValue", properties.get("SubclassProperty"));
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}*/
@Test
public void listEntities() {
listEntitiesImpl("partitionKey", "rowKey");
}
@Test
public void listEntitiesWithSingleQuotesInPartitionKey() {
listEntitiesImpl("partition'Key", "rowKey");
}
@Test
public void listEntitiesWithSingleQuotesInRowKey() {
listEntitiesImpl("partitionKey", "row'Key");
}
private void listEntitiesImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final String rowKeyValue2 = testResourceNamer.randomName(rowKeyPrefix, 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.listEntities())
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void listEntitiesWithFilter() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'");
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.assertNext(returnEntity -> {
assertEquals(partitionKeyValue, returnEntity.getPartitionKey());
assertEquals(rowKeyValue, returnEntity.getRowKey());
})
.expectNextCount(0)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void listEntitiesWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty("propertyC", "valueC")
.addProperty("propertyD", "valueD");
List<String> propertyList = new ArrayList<>();
propertyList.add("propertyC");
ListEntitiesOptions options = new ListEntitiesOptions()
.setSelect(propertyList);
tableClient.createEntity(entity).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.assertNext(returnEntity -> {
assertNull(returnEntity.getRowKey());
assertNull(returnEntity.getPartitionKey());
assertEquals("valueC", returnEntity.getProperties().get("propertyC"));
assertNull(returnEntity.getProperties().get("propertyD"));
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void listEntitiesWithTop() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setTop(2);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/*@Test
public void listEntitiesSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(SampleEntity.class))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}*/
@Test
public void submitTransaction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue2)));
final Response<TableTransactionResult> result =
tableClient.submitTransactionWithResponse(transactionalBatch).block(DEFAULT_TIMEOUT);
assertNotNull(result);
assertEquals(expectedBatchStatusCode, result.getStatusCode());
assertEquals(transactionalBatch.size(), result.getValue().getTransactionActionResponses().size());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(0).getStatusCode());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(1).getStatusCode());
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertNotNull(entity);
assertEquals(partitionKeyValue, entity.getPartitionKey());
assertEquals(rowKeyValue, entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void submitTransactionAllActions() {
submitTransactionAllActionsImpl("partitionKey", "rowKey");
}
@Test
public void submitTransactionAllActionsForEntitiesWithSingleQuotesInPartitionKey() {
submitTransactionAllActionsImpl("partition'Key", "rowKey");
}
@Test
public void submitTransactionAllActionsForEntitiesWithSingleQuotesInRowKey() {
submitTransactionAllActionsImpl("partitionKey", "row'Key");
}
private void submitTransactionAllActionsImpl(String partitionKeyPrefix, String rowKeyPrefix) {
String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
String rowKeyValueCreate = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertInsert = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertMerge = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertReplace = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpdateMerge = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpdateReplace = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueDelete = testResourceNamer.randomName(rowKeyPrefix, 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueDelete)).block(DEFAULT_TIMEOUT);
TableEntity toUpsertMerge = new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge);
toUpsertMerge.addProperty("Test", "MergedValue");
TableEntity toUpsertReplace = new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace);
toUpsertReplace.addProperty("Test", "ReplacedValue");
TableEntity toUpdateMerge = new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge);
toUpdateMerge.addProperty("Test", "MergedValue");
TableEntity toUpdateReplace = new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace);
toUpdateReplace.addProperty("Test", "MergedValue");
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValueCreate)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE,
new TableEntity(partitionKeyValue, rowKeyValueUpsertInsert)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE, toUpsertMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_REPLACE, toUpsertReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_MERGE, toUpdateMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_REPLACE, toUpdateReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValueDelete)));
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.assertNext(response -> {
assertNotNull(response);
assertEquals(expectedBatchStatusCode, response.getStatusCode());
TableTransactionResult result = response.getValue();
assertEquals(transactionalBatch.size(), result.getTransactionActionResponses().size());
for (TableTransactionActionResponse subResponse : result.getTransactionActionResponses()) {
assertEquals(expectedOperationStatusCode, subResponse.getStatusCode());
}
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void submitTransactionWithFailingAction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValue2)));
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("DeleteEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue2))
.verify(DEFAULT_TIMEOUT);
}
@Test
public void submitTransactionWithSameRowKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
if (IS_COSMOS_TEST) {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableServiceException
&& e.getMessage().contains("Status code 400")
&& e.getMessage().contains("InvalidDuplicateRow")
&& e.getMessage().contains("The batch request contains multiple changes with same row key.")
&& e.getMessage().contains("An entity can appear only once in a batch request."))
.verify(DEFAULT_TIMEOUT);
} else {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue))
.verify(DEFAULT_TIMEOUT);
}
}
@Test
public void submitTransactionWithDifferentPartitionKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String partitionKeyValue2 = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue2, rowKeyValue2)));
if (IS_COSMOS_TEST) {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue))
.verify(DEFAULT_TIMEOUT);
} else {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue2)
&& e.getMessage().contains("rowKey='" + rowKeyValue2))
.verify(DEFAULT_TIMEOUT);
}
}
@Test
public void generateSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("r");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("raud");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final String startPartitionKey = "startPartitionKey";
final String startRowKey = "startRowKey";
final String endPartitionKey = "endPartitionKey";
final String endRowKey = "endRowKey";
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange)
.setStartPartitionKey(startPartitionKey)
.setStartRowKey(startRowKey)
.setEndPartitionKey(endPartitionKey)
.setEndRowKey(endRowKey);
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=raud"
+ "&spk=startPartitionKey"
+ "&srk=startRowKey"
+ "&epk=endPartitionKey"
+ "&erk=endRowKey"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setAndListAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id = "testPolicy";
TableSignedIdentifier tableSignedIdentifier = new TableSignedIdentifier(id).setAccessPolicy(tableAccessPolicy);
StepVerifier.create(tableClient.setAccessPoliciesWithResponse(Collections.singletonList(tableSignedIdentifier)))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getAccessPolicies())
.assertNext(tableAccessPolicies -> {
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
TableSignedIdentifier signedIdentifier = tableAccessPolicies.getIdentifiers().get(0);
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
assertEquals(id, signedIdentifier.getId());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void setAndListMultipleAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id1 = "testPolicy1";
String id2 = "testPolicy2";
List<TableSignedIdentifier> tableSignedIdentifiers = new ArrayList<>();
tableSignedIdentifiers.add(new TableSignedIdentifier(id1).setAccessPolicy(tableAccessPolicy));
tableSignedIdentifiers.add(new TableSignedIdentifier(id2).setAccessPolicy(tableAccessPolicy));
StepVerifier.create(tableClient.setAccessPoliciesWithResponse(tableSignedIdentifiers))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getAccessPolicies())
.assertNext(tableAccessPolicies -> {
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
assertEquals(2, tableAccessPolicies.getIdentifiers().size());
assertEquals(id1, tableAccessPolicies.getIdentifiers().get(0).getId());
assertEquals(id2, tableAccessPolicies.getIdentifiers().get(1).getId());
for (TableSignedIdentifier signedIdentifier : tableAccessPolicies.getIdentifiers()) {
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
}
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void allowsCreationOfEntityWithEmptyStringPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
String entityName = testResourceNamer.randomName("name", 10);
TableEntity entity = new TableEntity("", "");
entity.addProperty("Name", entityName);
StepVerifier.create(tableClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void allowListEntitiesWithEmptyPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
TableEntity entity = new TableEntity("", "");
String entityName = testResourceNamer.randomName("name", 10);
entity.addProperty("Name", entityName);
tableClient.createEntity(entity).block();
ListEntitiesOptions options = new ListEntitiesOptions();
options.setFilter("PartitionKey eq '' and RowKey eq ''");
StepVerifier.create(tableClient.listEntities(options))
.assertNext(en -> assertEquals(entityName, en.getProperties().get("Name")))
.expectNextCount(0)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void allowDeleteEntityWithEmptyPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
TableEntity entity = new TableEntity("", "");
String entityName = testResourceNamer.randomName("name", 10);
entity.addProperty("Name", entityName);
tableClient.createEntity(entity).block();
StepVerifier.create(tableClient.deleteEntityWithResponse("", "", "*", false, null))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify();
}
} | class TableAsyncClientTest extends TableClientTestBase {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(100);
private TableAsyncClient tableClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
}
protected void beforeTest() {
final String tableName = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
tableClient = getClientBuilder(tableName, connectionString).buildAsyncClient();
tableClient.createTable().block(DEFAULT_TIMEOUT);
}
@Test
public void createTable() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableAsyncClient tableClient2 = getClientBuilder(tableName2, connectionString).buildAsyncClient();
StepVerifier.create(tableClient2.createTable())
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void createTableWithMultipleTenants() {
Assumptions.assumeTrue(tableClient.getTableEndpoint().contains("core.windows.net")
&& tableClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
final String tableName2 = testResourceNamer.randomName("tableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableAsyncClient tableClient2 =
getClientBuilder(tableName2, Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
StepVerifier.create(tableClient2.createTable())
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
StepVerifier.create(tableClient2.createEntity(tableEntity))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void createTableWithResponse() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableAsyncClient tableClient2 = getClientBuilder(tableName2, connectionString).buildAsyncClient();
final int expectedStatusCode = 204;
StepVerifier.create(tableClient2.createTableWithResponse())
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void createEntity() {
createEntityImpl("partitionKey", "rowKey");
}
@Test
public void createEntityWithSingleQuotesInPartitionKey() {
createEntityImpl("partition'Key", "rowKey");
}
@Test
public void createEntityWithSingleQuotesInRowKey() {
createEntityImpl("partitionKey", "row'Key");
}
private void createEntityImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
StepVerifier.create(tableClient.createEntity(tableEntity))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void createEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void createEntityWithAllSupportedDataTypes() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final boolean booleanValue = true;
final byte[] binaryValue = "Test value".getBytes();
final Date dateValue = new Date();
final OffsetDateTime offsetDateTimeValue = OffsetDateTime.now();
final double doubleValue = 2.0d;
final UUID guidValue = UUID.randomUUID();
final int int32Value = 1337;
final long int64Value = 1337L;
final String stringValue = "This is table entity";
tableEntity.addProperty("BinaryTypeProperty", binaryValue);
tableEntity.addProperty("BooleanTypeProperty", booleanValue);
tableEntity.addProperty("DateTypeProperty", dateValue);
tableEntity.addProperty("OffsetDateTimeTypeProperty", offsetDateTimeValue);
tableEntity.addProperty("DoubleTypeProperty", doubleValue);
tableEntity.addProperty("GuidTypeProperty", guidValue);
tableEntity.addProperty("Int32TypeProperty", int32Value);
tableEntity.addProperty("Int64TypeProperty", int64Value);
tableEntity.addProperty("StringTypeProperty", stringValue);
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.get("BinaryTypeProperty") instanceof byte[]);
assertTrue(properties.get("BooleanTypeProperty") instanceof Boolean);
assertTrue(properties.get("DateTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("OffsetDateTimeTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("DoubleTypeProperty") instanceof Double);
assertTrue(properties.get("GuidTypeProperty") instanceof UUID);
assertTrue(properties.get("Int32TypeProperty") instanceof Integer);
assertTrue(properties.get("Int64TypeProperty") instanceof Long);
assertTrue(properties.get("StringTypeProperty") instanceof String);
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/*@Test
public void createEntitySubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
SampleEntity tableEntity = new SampleEntity(partitionKeyValue, rowKeyValue);
tableEntity.setByteField(bytes);
tableEntity.setBooleanField(b);
tableEntity.setDateTimeField(dateTime);
tableEntity.setDoubleField(d);
tableEntity.setUuidField(uuid);
tableEntity.setIntField(i);
tableEntity.setLongField(l);
tableEntity.setStringField(s);
tableEntity.setEnumField(color);
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
TableEntity entity = response.getValue();
assertArrayEquals((byte[]) entity.getProperties().get("ByteField"), bytes);
assertEquals(entity.getProperties().get("BooleanField"), b);
assertTrue(dateTime.isEqual((OffsetDateTime) entity.getProperties().get("DateTimeField")));
assertEquals(entity.getProperties().get("DoubleField"), d);
assertEquals(0, uuid.compareTo((UUID) entity.getProperties().get("UuidField")));
assertEquals(entity.getProperties().get("IntField"), i);
assertEquals(entity.getProperties().get("LongField"), l);
assertEquals(entity.getProperties().get("StringField"), s);
assertEquals(entity.getProperties().get("EnumField"), color.name());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}*/
@Test
public void deleteTable() {
StepVerifier.create(tableClient.deleteTable())
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteNonExistingTable() {
tableClient.deleteTable().block();
StepVerifier.create(tableClient.deleteTable())
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteTableWithResponse() {
final int expectedStatusCode = 204;
StepVerifier.create(tableClient.deleteTableWithResponse())
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteNonExistingTableWithResponse() {
final int expectedStatusCode = 404;
tableClient.deleteTableWithResponse().block();
StepVerifier.create(tableClient.deleteTableWithResponse())
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteEntity() {
deleteEntityImpl("partitionKey", "rowKey");
}
@Test
public void deleteEntityWithSingleQuotesInPartitionKey() {
deleteEntityImpl("partition'Key", "rowKey");
}
@Test
public void deleteEntityWithSingleQuotesInRowKey() {
deleteEntityImpl("partitionKey", "row'Key");
}
private void deleteEntityImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(DEFAULT_TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteNonExistingEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(DEFAULT_TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntityWithResponse(createdEntity, false))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteNonExistingEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 404;
StepVerifier.create(tableClient.deleteEntityWithResponse(entity, false))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void deleteEntityWithResponseMatchETag() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(DEFAULT_TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntityWithResponse(createdEntity, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void getEntityWithSingleQuotesInPartitionKey() {
getEntityWithResponseAsyncImpl(this.tableClient, this.testResourceNamer, "partition'Key", "rowKey");
}
@Test
public void getEntityWithSingleQuotesInRowKey() {
getEntityWithResponseAsyncImpl(this.tableClient, this.testResourceNamer, "partitionKey", "row'Key");
}
@Test
public void getEntityWithResponse() {
getEntityWithResponseAsyncImpl(this.tableClient, this.testResourceNamer, "partitionKey", "rowKey");
}
static void getEntityWithResponseAsyncImpl(TableAsyncClient tableClient, TestResourceNamer testResourceNamer,
String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void getEntityWithResponseWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.addProperty("Test", "Value");
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
List<String> propertyList = new ArrayList<>();
propertyList.add("Test");
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, propertyList))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertNull(entity.getPartitionKey());
assertNull(entity.getRowKey());
assertNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertEquals(entity.getProperties().get("Test"), "Value");
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void updateEntityWithSingleQuotesInPartitionKey() {
updateEntityWithResponseAsync(TableEntityUpdateMode.REPLACE, "partition'Key", "rowKey");
}
@Test
public void updateEntityWithSingleQuotesInRowKey() {
updateEntityWithResponseAsync(TableEntityUpdateMode.REPLACE, "partitionKey", "row'Key");
}
/*@Test
public void getEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
final Map<String, Object> props = new HashMap<>();
props.put("ByteField", bytes);
props.put("BooleanField", b);
props.put("DateTimeField", dateTime);
props.put("DoubleField", d);
props.put("UuidField", uuid);
props.put("IntField", i);
props.put("LongField", l);
props.put("StringField", s);
props.put("EnumField", color);
TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.setProperties(props);
int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, SampleEntity.class))
.assertNext(response -> {
SampleEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertArrayEquals(bytes, entity.getByteField());
assertEquals(b, entity.getBooleanField());
assertTrue(dateTime.isEqual(entity.getDateTimeField()));
assertEquals(d, entity.getDoubleField());
assertEquals(0, uuid.compareTo(entity.getUuidField()));
assertEquals(i, entity.getIntField());
assertEquals(l, entity.getLongField());
assertEquals(s, entity.getStringField());
assertEquals(color, entity.getEnumField());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}*/
@Test
public void updateEntityWithResponseReplace() {
updateEntityWithResponseAsync(TableEntityUpdateMode.REPLACE, "partitionKey", "rowKey");
}
@Test
public void updateEntityWithResponseMerge() {
updateEntityWithResponseAsync(TableEntityUpdateMode.MERGE, "partitionKey", "rowKey");
}
/**
* In the case of {@link TableEntityUpdateMode
* In the case of {@link TableEntityUpdateMode
*/
void updateEntityWithResponseAsync(TableEntityUpdateMode mode, String partitionKeyPrefix, String rowKeyPrefix) {
final boolean expectOldProperty = mode == TableEntityUpdateMode.MERGE;
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final int expectedStatusCode = 204;
final String oldPropertyKey = "propertyA";
final String newPropertyKey = "propertyB";
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty(oldPropertyKey, "valueA");
tableClient.createEntity(tableEntity).block(DEFAULT_TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(DEFAULT_TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
createdEntity.getProperties().remove(oldPropertyKey);
createdEntity.addProperty(newPropertyKey, "valueB");
StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, mode, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue))
.assertNext(entity -> {
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey(newPropertyKey));
assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey));
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/*@Test
public void updateEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
int expectedStatusCode = 204;
SingleFieldEntity tableEntity = new SingleFieldEntity(partitionKeyValue, rowKeyValue);
tableEntity.setSubclassProperty("InitialValue");
tableClient.createEntity(tableEntity).block(TIMEOUT);
tableEntity.setSubclassProperty("UpdatedValue");
StepVerifier.create(tableClient.updateEntityWithResponse(tableEntity, TableEntityUpdateMode.REPLACE, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue))
.assertNext(entity -> {
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey("SubclassProperty"));
assertEquals("UpdatedValue", properties.get("SubclassProperty"));
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}*/
@Test
public void listEntities() {
listEntitiesImpl("partitionKey", "rowKey");
}
@Test
public void listEntitiesWithSingleQuotesInPartitionKey() {
listEntitiesImpl("partition'Key", "rowKey");
}
@Test
public void listEntitiesWithSingleQuotesInRowKey() {
listEntitiesImpl("partitionKey", "row'Key");
}
private void listEntitiesImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final String rowKeyValue2 = testResourceNamer.randomName(rowKeyPrefix, 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.listEntities())
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void listEntitiesWithFilter() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'");
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.assertNext(returnEntity -> {
assertEquals(partitionKeyValue, returnEntity.getPartitionKey());
assertEquals(rowKeyValue, returnEntity.getRowKey());
})
.expectNextCount(0)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void listEntitiesWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty("propertyC", "valueC")
.addProperty("propertyD", "valueD");
List<String> propertyList = new ArrayList<>();
propertyList.add("propertyC");
ListEntitiesOptions options = new ListEntitiesOptions()
.setSelect(propertyList);
tableClient.createEntity(entity).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.assertNext(returnEntity -> {
assertNull(returnEntity.getRowKey());
assertNull(returnEntity.getPartitionKey());
assertEquals("valueC", returnEntity.getProperties().get("propertyC"));
assertNull(returnEntity.getProperties().get("propertyD"));
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void listEntitiesWithTop() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setTop(2);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)).block(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/*@Test
public void listEntitiesSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(SampleEntity.class))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}*/
@Test
public void submitTransaction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue2)));
final Response<TableTransactionResult> result =
tableClient.submitTransactionWithResponse(transactionalBatch).block(DEFAULT_TIMEOUT);
assertNotNull(result);
assertEquals(expectedBatchStatusCode, result.getStatusCode());
assertEquals(transactionalBatch.size(), result.getValue().getTransactionActionResponses().size());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(0).getStatusCode());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(1).getStatusCode());
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertNotNull(entity);
assertEquals(partitionKeyValue, entity.getPartitionKey());
assertEquals(rowKeyValue, entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void submitTransactionAllActions() {
submitTransactionAllActionsImpl("partitionKey", "rowKey");
}
@Test
public void submitTransactionAllActionsForEntitiesWithSingleQuotesInPartitionKey() {
submitTransactionAllActionsImpl("partition'Key", "rowKey");
}
@Test
public void submitTransactionAllActionsForEntitiesWithSingleQuotesInRowKey() {
submitTransactionAllActionsImpl("partitionKey", "row'Key");
}
private void submitTransactionAllActionsImpl(String partitionKeyPrefix, String rowKeyPrefix) {
String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
String rowKeyValueCreate = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertInsert = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertMerge = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertReplace = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpdateMerge = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpdateReplace = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueDelete = testResourceNamer.randomName(rowKeyPrefix, 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace)).block(DEFAULT_TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueDelete)).block(DEFAULT_TIMEOUT);
TableEntity toUpsertMerge = new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge);
toUpsertMerge.addProperty("Test", "MergedValue");
TableEntity toUpsertReplace = new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace);
toUpsertReplace.addProperty("Test", "ReplacedValue");
TableEntity toUpdateMerge = new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge);
toUpdateMerge.addProperty("Test", "MergedValue");
TableEntity toUpdateReplace = new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace);
toUpdateReplace.addProperty("Test", "MergedValue");
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValueCreate)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE,
new TableEntity(partitionKeyValue, rowKeyValueUpsertInsert)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE, toUpsertMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_REPLACE, toUpsertReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_MERGE, toUpdateMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_REPLACE, toUpdateReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValueDelete)));
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.assertNext(response -> {
assertNotNull(response);
assertEquals(expectedBatchStatusCode, response.getStatusCode());
TableTransactionResult result = response.getValue();
assertEquals(transactionalBatch.size(), result.getTransactionActionResponses().size());
for (TableTransactionActionResponse subResponse : result.getTransactionActionResponses()) {
assertEquals(expectedOperationStatusCode, subResponse.getStatusCode());
}
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void submitTransactionWithFailingAction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValue2)));
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("DeleteEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue2))
.verify(DEFAULT_TIMEOUT);
}
@Test
public void submitTransactionWithSameRowKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
if (IS_COSMOS_TEST) {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableServiceException
&& e.getMessage().contains("Status code 400")
&& e.getMessage().contains("InvalidDuplicateRow")
&& e.getMessage().contains("The batch request contains multiple changes with same row key.")
&& e.getMessage().contains("An entity can appear only once in a batch request."))
.verify(DEFAULT_TIMEOUT);
} else {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue))
.verify(DEFAULT_TIMEOUT);
}
}
@Test
public void submitTransactionWithDifferentPartitionKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String partitionKeyValue2 = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue2, rowKeyValue2)));
if (IS_COSMOS_TEST) {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue))
.verify(DEFAULT_TIMEOUT);
} else {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue2)
&& e.getMessage().contains("rowKey='" + rowKeyValue2))
.verify(DEFAULT_TIMEOUT);
}
}
@Test
public void generateSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("r");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("raud");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final String startPartitionKey = "startPartitionKey";
final String startRowKey = "startRowKey";
final String endPartitionKey = "endPartitionKey";
final String endRowKey = "endRowKey";
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange)
.setStartPartitionKey(startPartitionKey)
.setStartRowKey(startRowKey)
.setEndPartitionKey(endPartitionKey)
.setEndRowKey(endRowKey);
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=raud"
+ "&spk=startPartitionKey"
+ "&srk=startRowKey"
+ "&epk=endPartitionKey"
+ "&erk=endRowKey"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setAndListAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id = "testPolicy";
TableSignedIdentifier tableSignedIdentifier = new TableSignedIdentifier(id).setAccessPolicy(tableAccessPolicy);
StepVerifier.create(tableClient.setAccessPoliciesWithResponse(Collections.singletonList(tableSignedIdentifier)))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getAccessPolicies())
.assertNext(tableAccessPolicies -> {
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
TableSignedIdentifier signedIdentifier = tableAccessPolicies.getIdentifiers().get(0);
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
assertEquals(id, signedIdentifier.getId());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void setAndListMultipleAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id1 = "testPolicy1";
String id2 = "testPolicy2";
List<TableSignedIdentifier> tableSignedIdentifiers = new ArrayList<>();
tableSignedIdentifiers.add(new TableSignedIdentifier(id1).setAccessPolicy(tableAccessPolicy));
tableSignedIdentifiers.add(new TableSignedIdentifier(id2).setAccessPolicy(tableAccessPolicy));
StepVerifier.create(tableClient.setAccessPoliciesWithResponse(tableSignedIdentifiers))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
StepVerifier.create(tableClient.getAccessPolicies())
.assertNext(tableAccessPolicies -> {
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
assertEquals(2, tableAccessPolicies.getIdentifiers().size());
assertEquals(id1, tableAccessPolicies.getIdentifiers().get(0).getId());
assertEquals(id2, tableAccessPolicies.getIdentifiers().get(1).getId());
for (TableSignedIdentifier signedIdentifier : tableAccessPolicies.getIdentifiers()) {
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
}
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void allowsCreationOfEntityWithEmptyStringPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
String entityName = testResourceNamer.randomName("name", 10);
TableEntity entity = new TableEntity("", "");
entity.addProperty("Name", entityName);
StepVerifier.create(tableClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void allowListEntitiesWithEmptyPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
TableEntity entity = new TableEntity("", "");
String entityName = testResourceNamer.randomName("name", 10);
entity.addProperty("Name", entityName);
tableClient.createEntity(entity).block();
ListEntitiesOptions options = new ListEntitiesOptions();
options.setFilter("PartitionKey eq '' and RowKey eq ''");
StepVerifier.create(tableClient.listEntities(options))
.assertNext(en -> assertEquals(entityName, en.getProperties().get("Name")))
.expectNextCount(0)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void allowDeleteEntityWithEmptyPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
TableEntity entity = new TableEntity("", "");
String entityName = testResourceNamer.randomName("name", 10);
entity.addProperty("Name", entityName);
tableClient.createEntity(entity).block();
StepVerifier.create(tableClient.deleteEntityWithResponse("", "", "*", false, null))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify();
}
} |
Same comment about using `OffsetDateTime.now()` and adding to it. | public void canUseSasTokenToCreateValidTableClient() {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Skipping Cosmos test.");
final OffsetDateTime expiryTime = OffsetDateTime.of(2024, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("a");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(tableClient.getTableEndpoint())
.sasToken(sas)
.tableName(tableClient.getTableName());
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableClient newTableClient = tableClientBuilder.buildClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, newTableClient.createEntityWithResponse(entity, null, null).getStatusCode());
} | final OffsetDateTime expiryTime = OffsetDateTime.of(2024, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC); | public void canUseSasTokenToCreateValidTableClient() {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Skipping Cosmos test.");
final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
final TableSasPermission permissions = TableSasPermission.parse("a");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(tableClient.getTableEndpoint())
.sasToken(sas)
.tableName(tableClient.getTableName());
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableClient newTableClient = tableClientBuilder.buildClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, newTableClient.createEntityWithResponse(entity, null, null).getStatusCode());
} | class TableClientTest extends TableClientTestBase {
private TableClient tableClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
protected void beforeTest() {
final String tableName = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
tableClient = getClientBuilder(tableName, connectionString).buildClient();
tableClient.createTable();
}
protected void afterTest() {
tableClient.deleteTable();
}
@Test
public void createTable() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableClient tableClient2 = getClientBuilder(tableName2, connectionString).buildClient();
assertNotNull(tableClient2.createTable());
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void createTableWithMultipleTenants() {
Assumptions.assumeTrue(tableClient.getTableEndpoint().contains("core.windows.net")
&& tableClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
final String tableName2 = testResourceNamer.randomName("tableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableClient tableClient2 =
getClientBuilder(tableName2, Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
assertNotNull(tableClient2.createTable());
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
assertDoesNotThrow(() -> tableClient2.createEntity(tableEntity));
}
@Test
public void createTableWithResponse() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableClient tableClient2 = getClientBuilder(tableName2, connectionString).buildClient();
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient2.createTableWithResponse(null, null).getStatusCode());
}
@Test
public void createEntity() {
createEntityImpl("partitionKey", "rowKey");
}
@Test
public void createEntityWithSingleQuotesInPartitionKey() {
createEntityImpl("partition'Key", "rowKey");
}
@Test
public void createEntityWithSingleQuotesInRowKey() {
createEntityImpl("partitionKey", "row'Key");
}
private void createEntityImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
assertDoesNotThrow(() -> tableClient.createEntity(tableEntity));
}
@Test
public void createEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient.createEntityWithResponse(entity, null, null).getStatusCode());
}
@Test
public void createEntityWithAllSupportedDataTypes() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final boolean booleanValue = true;
final byte[] binaryValue = "Test value".getBytes();
final Date dateValue = new Date();
final OffsetDateTime offsetDateTimeValue = OffsetDateTime.now();
final double doubleValue = 2.0d;
final UUID guidValue = UUID.randomUUID();
final int int32Value = 1337;
final long int64Value = 1337L;
final String stringValue = "This is table entity";
tableEntity.addProperty("BinaryTypeProperty", binaryValue);
tableEntity.addProperty("BooleanTypeProperty", booleanValue);
tableEntity.addProperty("DateTypeProperty", dateValue);
tableEntity.addProperty("OffsetDateTimeTypeProperty", offsetDateTimeValue);
tableEntity.addProperty("DoubleTypeProperty", doubleValue);
tableEntity.addProperty("GuidTypeProperty", guidValue);
tableEntity.addProperty("Int32TypeProperty", int32Value);
tableEntity.addProperty("Int64TypeProperty", int64Value);
tableEntity.addProperty("StringTypeProperty", stringValue);
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.get("BinaryTypeProperty") instanceof byte[]);
assertTrue(properties.get("BooleanTypeProperty") instanceof Boolean);
assertTrue(properties.get("DateTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("OffsetDateTimeTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("DoubleTypeProperty") instanceof Double);
assertTrue(properties.get("GuidTypeProperty") instanceof UUID);
assertTrue(properties.get("Int32TypeProperty") instanceof Integer);
assertTrue(properties.get("Int64TypeProperty") instanceof Long);
assertTrue(properties.get("StringTypeProperty") instanceof String);
}
/*@Test
public void createEntitySubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
SampleEntity tableEntity = new SampleEntity(partitionKeyValue, rowKeyValue);
tableEntity.setByteField(bytes);
tableEntity.setBooleanField(b);
tableEntity.setDateTimeField(dateTime);
tableEntity.setDoubleField(d);
tableEntity.setUuidField(uuid);
tableEntity.setIntField(i);
tableEntity.setLongField(l);
tableEntity.setStringField(s);
tableEntity.setEnumField(color);
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
TableEntity entity = response.getValue();
assertArrayEquals((byte[]) entity.getProperties().get("ByteField"), bytes);
assertEquals(entity.getProperties().get("BooleanField"), b);
assertTrue(dateTime.isEqual((OffsetDateTime) entity.getProperties().get("DateTimeField")));
assertEquals(entity.getProperties().get("DoubleField"), d);
assertEquals(0, uuid.compareTo((UUID) entity.getProperties().get("UuidField")));
assertEquals(entity.getProperties().get("IntField"), i);
assertEquals(entity.getProperties().get("LongField"), l);
assertEquals(entity.getProperties().get("StringField"), s);
assertEquals(entity.getProperties().get("EnumField"), color.name());
}*/
@Test
public void deleteTable() {
assertDoesNotThrow(() -> tableClient.deleteTable());
}
@Test
public void deleteNonExistingTable() {
tableClient.deleteTable();
assertDoesNotThrow(() -> tableClient.deleteTable());
}
@Test
public void deleteTableWithResponse() {
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient.deleteTableWithResponse(null, null).getStatusCode());
}
@Test
public void deleteNonExistingTableWithResponse() {
final int expectedStatusCode = 404;
tableClient.deleteTableWithResponse(null, null);
assertEquals(expectedStatusCode, tableClient.deleteTableWithResponse(null, null).getStatusCode());
}
@Test
public void deleteEntity() {
deleteEntityImpl("partitionKey", "rowKey");
}
@Test
public void deleteEntityWithSingleQuotesInPartitionKey() {
deleteEntityImpl("partition'Key", "rowKey");
}
@Test
public void deleteEntityWithSingleQuotesInRowKey() {
deleteEntityImpl("partitionKey", "row'Key");
}
private void deleteEntityImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertDoesNotThrow(() -> tableClient.deleteEntity(partitionKeyValue, rowKeyValue));
}
@Test
public void deleteNonExistingEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
assertDoesNotThrow(() -> tableClient.deleteEntity(partitionKeyValue, rowKeyValue));
}
@Test
public void deleteEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(createdEntity, false, null, null).getStatusCode());
}
@Test
public void deleteNonExistingEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 404;
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(entity, false, null, null).getStatusCode());
}
@Test
public void deleteEntityWithResponseMatchETag() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(createdEntity, true, null, null).getStatusCode());
}
@Test
public void getEntityWithSingleQuotesInPartitionKey() {
getEntityWithResponseImpl(tableClient, testResourceNamer, "partition'Key", "rowKey");
}
@Test
public void getEntityWithSingleQuotesInRowKey() {
getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "row'Key");
}
@Test
public void getEntityWithResponse() {
getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
static void getEntityWithResponseImpl(TableClient tableClient, TestResourceNamer testResourceNamer,
String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
}
@Test
public void getEntityWithResponseWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.addProperty("Test", "Value");
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
List<String> propertyList = new ArrayList<>();
propertyList.add("Test");
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, propertyList, null, null);
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertNull(entity.getPartitionKey());
assertNull(entity.getRowKey());
assertNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertEquals(entity.getProperties().get("Test"), "Value");
}
@Test
public void updateEntityWithSingleQuotesInPartitionKey() {
updateEntityWithResponseImpl(TableEntityUpdateMode.MERGE, testResourceNamer.randomName("partition'Key", 20),
testResourceNamer.randomName("rowKey", 20));
}
@Test
public void updateEntityWithSingleQuotesInRowKey() {
updateEntityWithResponseImpl(TableEntityUpdateMode.MERGE, testResourceNamer.randomName("partitionKey", 20),
testResourceNamer.randomName("row'Key", 20));
}
/*@Test
public void getEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
final Map<String, Object> props = new HashMap<>();
props.put("ByteField", bytes);
props.put("BooleanField", b);
props.put("DateTimeField", dateTime);
props.put("DoubleField", d);
props.put("UuidField", uuid);
props.put("IntField", i);
props.put("LongField", l);
props.put("StringField", s);
props.put("EnumField", color);
TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.setProperties(props);
int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, SampleEntity.class, null, null);
SampleEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertArrayEquals(bytes, entity.getByteField());
assertEquals(b, entity.getBooleanField());
assertTrue(dateTime.isEqual(entity.getDateTimeField()));
assertEquals(d, entity.getDoubleField());
assertEquals(0, uuid.compareTo(entity.getUuidField()));
assertEquals(i, entity.getIntField());
assertEquals(l, entity.getLongField());
assertEquals(s, entity.getStringField());
assertEquals(color, entity.getEnumField());
}*/
@Test
public void updateEntityWithResponseReplace() {
updateEntityWithResponseImpl(TableEntityUpdateMode.REPLACE, "partitionKey", "rowKey");
}
@Test
public void updateEntityWithResponseMerge() {
updateEntityWithResponseImpl(TableEntityUpdateMode.MERGE, "partitionKey", "rowKey");
}
/**
* In the case of {@link TableEntityUpdateMode
* In the case of {@link TableEntityUpdateMode
*/
void updateEntityWithResponseImpl(TableEntityUpdateMode mode, String partitionKeyPrefix, String rowKeyPrefix) {
final boolean expectOldProperty = mode == TableEntityUpdateMode.MERGE;
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final int expectedStatusCode = 204;
final String oldPropertyKey = "propertyA";
final String newPropertyKey = "propertyB";
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty(oldPropertyKey, "valueA");
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
createdEntity.getProperties().remove(oldPropertyKey);
createdEntity.addProperty(newPropertyKey, "valueB");
assertEquals(expectedStatusCode,
tableClient.updateEntityWithResponse(createdEntity, mode, true, null, null).getStatusCode());
TableEntity entity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey(newPropertyKey));
assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey));
}
/*@Test
public void updateEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
int expectedStatusCode = 204;
SingleFieldEntity tableEntity = new SingleFieldEntity(partitionKeyValue, rowKeyValue);
tableEntity.setSubclassProperty("InitialValue");
tableClient.createEntity(tableEntity);
tableEntity.setSubclassProperty("UpdatedValue");
assertEquals(expectedStatusCode,
tableClient.updateEntityWithResponse(tableEntity, TableEntityUpdateMode.REPLACE, true, null, null)
.getStatusCode()));
TableEntity entity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey("SubclassProperty"));
assertEquals("UpdatedValue", properties.get("SubclassProperty"));
}*/
@Test
public void listEntities() {
listEntitiesImpl("partitionKey", "rowKey");
}
@Test
public void listEntitiesWithSingleQuotesInPartitionKey() {
listEntitiesImpl("partition'Key", "rowKey");
}
@Test
public void listEntitiesWithSingleQuotesInRowKey() {
listEntitiesImpl("partitionKey", "row'Key");
}
private void listEntitiesImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final String rowKeyValue2 = testResourceNamer.randomName(rowKeyPrefix, 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities().iterableByPage().iterator();
assertTrue(iterator.hasNext());
List<TableEntity> retrievedEntities = iterator.next().getValue();
assertEquals(2, retrievedEntities.size());
}
@Test
public void listEntitiesWithFilter() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'");
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
tableClient.listEntities(options, null, null).forEach(tableEntity -> {
assertEquals(partitionKeyValue, tableEntity.getPartitionKey());
assertEquals(rowKeyValue, tableEntity.getRowKey());
});
}
@Test
public void listEntitiesWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty("propertyC", "valueC")
.addProperty("propertyD", "valueD");
List<String> propertyList = new ArrayList<>();
propertyList.add("propertyC");
ListEntitiesOptions options = new ListEntitiesOptions()
.setSelect(propertyList);
tableClient.createEntity(entity);
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
TableEntity retrievedEntity = iterator.next().getValue().get(0);
assertNull(retrievedEntity.getPartitionKey());
assertNull(retrievedEntity.getRowKey());
assertEquals("valueC", retrievedEntity.getProperties().get("propertyC"));
assertNull(retrievedEntity.getProperties().get("propertyD"));
}
@Test
public void listEntitiesWithTop() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setTop(2);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
/*@Test
public void listEntitiesSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(SampleEntity.class).iterableByPage().iterator();
assertTrue(iterator.hasNext());
List<TableEntity> retrievedEntities = iterator.next().getValue();
TableEntity retrievedEntity = retrievedEntities.get(0);
TableEntity retrievedEntity2 = retrievedEntities.get(1);
assertEquals(partitionKeyValue, retrievedEntity.getPartitionKey());
assertEquals(rowKeyValue, retrievedEntity.getRowKey());
assertEquals(partitionKeyValue, retrievedEntity2.getPartitionKey());
assertEquals(rowKeyValue2, retrievedEntity2.getRowKey());
}*/
@Test
public void submitTransaction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue2)));
final Response<TableTransactionResult> result =
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
assertNotNull(result);
assertEquals(expectedBatchStatusCode, result.getStatusCode());
assertEquals(transactionalBatch.size(), result.getValue().getTransactionActionResponses().size());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(0).getStatusCode());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(1).getStatusCode());
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
assertNotNull(entity);
assertEquals(partitionKeyValue, entity.getPartitionKey());
assertEquals(rowKeyValue, entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
}
@Test
public void submitTransactionAllActions() {
Runnable func = () -> submitTransactionAllActionsImpl("partitionKey", "rowKey");
func.run();
}
@Test
public void submitTransactionAllActionsForEntitiesWithSingleQuotesInPartitionKey() {
submitTransactionAllActionsImpl("partition'Key", "rowKey");
}
@Test
public void submitTransactionAllActionsForEntitiesWithSingleQuotesInRowKey() {
submitTransactionAllActionsImpl("partitionKey", "row'Key");
}
private void submitTransactionAllActionsImpl(String partitionKeyPrefix, String rowKeyPrefix) {
String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
String rowKeyValueCreate = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertInsert = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertMerge = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertReplace = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpdateMerge = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpdateReplace = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueDelete = testResourceNamer.randomName(rowKeyPrefix, 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueDelete));
TableEntity toUpsertMerge = new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge);
toUpsertMerge.addProperty("Test", "MergedValue");
TableEntity toUpsertReplace = new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace);
toUpsertReplace.addProperty("Test", "ReplacedValue");
TableEntity toUpdateMerge = new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge);
toUpdateMerge.addProperty("Test", "MergedValue");
TableEntity toUpdateReplace = new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace);
toUpdateReplace.addProperty("Test", "MergedValue");
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValueCreate)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE,
new TableEntity(partitionKeyValue, rowKeyValueUpsertInsert)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE, toUpsertMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_REPLACE, toUpsertReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_MERGE, toUpdateMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_REPLACE, toUpdateReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValueDelete)));
final Response<TableTransactionResult> response =
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
assertNotNull(response);
assertEquals(expectedBatchStatusCode, response.getStatusCode());
TableTransactionResult result = response.getValue();
assertEquals(transactionalBatch.size(), result.getTransactionActionResponses().size());
for (TableTransactionActionResponse subResponse : result.getTransactionActionResponses()) {
assertEquals(expectedOperationStatusCode, subResponse.getStatusCode());
}
}
@Test
public void submitTransactionWithFailingAction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValue2)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("DeleteEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue2));
return;
}
fail();
}
@Test
public void submitTransactionWithSameRowKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue));
return;
} catch (TableServiceException e) {
assertTrue(IS_COSMOS_TEST);
assertEquals(400, e.getResponse().getStatusCode());
assertTrue(e.getMessage().contains("InvalidDuplicateRow"));
return;
}
fail();
}
@Test
public void submitTransactionWithDifferentPartitionKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String partitionKeyValue2 = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue2, rowKeyValue2)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
if (IS_COSMOS_TEST) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue));
} else {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue2));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue2));
}
return;
}
fail();
}
@Test
public void generateSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("r");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("raud");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final String startPartitionKey = "startPartitionKey";
final String startRowKey = "startRowKey";
final String endPartitionKey = "endPartitionKey";
final String endRowKey = "endRowKey";
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange)
.setStartPartitionKey(startPartitionKey)
.setStartRowKey(startRowKey)
.setEndPartitionKey(endPartitionKey)
.setEndRowKey(endRowKey);
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=raud"
+ "&spk=startPartitionKey"
+ "&srk=startRowKey"
+ "&epk=endPartitionKey"
+ "&erk=endRowKey"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setAndListAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id = "testPolicy";
TableSignedIdentifier tableSignedIdentifier = new TableSignedIdentifier(id).setAccessPolicy(tableAccessPolicy);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode,
tableClient.setAccessPoliciesWithResponse(Collections.singletonList(tableSignedIdentifier), null, null)
.getStatusCode());
TableAccessPolicies tableAccessPolicies = tableClient.getAccessPolicies();
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
TableSignedIdentifier signedIdentifier = tableAccessPolicies.getIdentifiers().get(0);
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
assertEquals(id, signedIdentifier.getId());
}
@Test
public void setAndListMultipleAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id1 = "testPolicy1";
String id2 = "testPolicy2";
List<TableSignedIdentifier> tableSignedIdentifiers = new ArrayList<>();
tableSignedIdentifiers.add(new TableSignedIdentifier(id1).setAccessPolicy(tableAccessPolicy));
tableSignedIdentifiers.add(new TableSignedIdentifier(id2).setAccessPolicy(tableAccessPolicy));
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode,
tableClient.setAccessPoliciesWithResponse(tableSignedIdentifiers, null, null).getStatusCode());
TableAccessPolicies tableAccessPolicies = tableClient.getAccessPolicies();
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
assertEquals(2, tableAccessPolicies.getIdentifiers().size());
assertEquals(id1, tableAccessPolicies.getIdentifiers().get(0).getId());
assertEquals(id2, tableAccessPolicies.getIdentifiers().get(1).getId());
for (TableSignedIdentifier signedIdentifier : tableAccessPolicies.getIdentifiers()) {
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
}
}
@Test
public void allowsCreationOfEntityWithEmptyStringPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
Assertions.assertDoesNotThrow(() -> {
TableEntity entity = new TableEntity("", "");
tableClient.createEntity(entity);
});
}
@Test
public void allowListEntitiesWithEmptyPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
TableEntity entity = new TableEntity("", "");
String entityName = testResourceNamer.randomName("name", 10);
entity.addProperty("Name", entityName);
tableClient.createEntity(entity);
ListEntitiesOptions options = new ListEntitiesOptions();
options.setFilter("PartitionKey eq '' and RowKey eq ''");
PagedIterable<TableEntity> response = tableClient.listEntities(options, Duration.ofSeconds(10), null);
ArrayList<TableEntity> responseArray = new ArrayList<TableEntity>();
for (TableEntity responseEntity : response) {
responseArray.add(responseEntity);
}
assertEquals(1, responseArray.size());
assertEquals(entityName, responseArray.get(0).getProperty("Name"));
}
@Test
public void allowDeleteEntityWithEmptyPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
TableEntity entity = new TableEntity("", "");
String entityName = testResourceNamer.randomName("name", 10);
entity.addProperty("Name", entityName);
tableClient.createEntity(entity);
tableClient.deleteEntity(entity);
}
} | class TableClientTest extends TableClientTestBase {
private TableClient tableClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
protected void beforeTest() {
final String tableName = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
tableClient = getClientBuilder(tableName, connectionString).buildClient();
tableClient.createTable();
}
protected void afterTest() {
tableClient.deleteTable();
}
@Test
public void createTable() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableClient tableClient2 = getClientBuilder(tableName2, connectionString).buildClient();
assertNotNull(tableClient2.createTable());
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void createTableWithMultipleTenants() {
Assumptions.assumeTrue(tableClient.getTableEndpoint().contains("core.windows.net")
&& tableClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
final String tableName2 = testResourceNamer.randomName("tableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableClient tableClient2 =
getClientBuilder(tableName2, Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
assertNotNull(tableClient2.createTable());
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
assertDoesNotThrow(() -> tableClient2.createEntity(tableEntity));
}
@Test
public void createTableWithResponse() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableClient tableClient2 = getClientBuilder(tableName2, connectionString).buildClient();
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient2.createTableWithResponse(null, null).getStatusCode());
}
@Test
public void createEntity() {
createEntityImpl("partitionKey", "rowKey");
}
@Test
public void createEntityWithSingleQuotesInPartitionKey() {
createEntityImpl("partition'Key", "rowKey");
}
@Test
public void createEntityWithSingleQuotesInRowKey() {
createEntityImpl("partitionKey", "row'Key");
}
private void createEntityImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
assertDoesNotThrow(() -> tableClient.createEntity(tableEntity));
}
@Test
public void createEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient.createEntityWithResponse(entity, null, null).getStatusCode());
}
@Test
public void createEntityWithAllSupportedDataTypes() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final boolean booleanValue = true;
final byte[] binaryValue = "Test value".getBytes();
final Date dateValue = new Date();
final OffsetDateTime offsetDateTimeValue = OffsetDateTime.now();
final double doubleValue = 2.0d;
final UUID guidValue = UUID.randomUUID();
final int int32Value = 1337;
final long int64Value = 1337L;
final String stringValue = "This is table entity";
tableEntity.addProperty("BinaryTypeProperty", binaryValue);
tableEntity.addProperty("BooleanTypeProperty", booleanValue);
tableEntity.addProperty("DateTypeProperty", dateValue);
tableEntity.addProperty("OffsetDateTimeTypeProperty", offsetDateTimeValue);
tableEntity.addProperty("DoubleTypeProperty", doubleValue);
tableEntity.addProperty("GuidTypeProperty", guidValue);
tableEntity.addProperty("Int32TypeProperty", int32Value);
tableEntity.addProperty("Int64TypeProperty", int64Value);
tableEntity.addProperty("StringTypeProperty", stringValue);
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.get("BinaryTypeProperty") instanceof byte[]);
assertTrue(properties.get("BooleanTypeProperty") instanceof Boolean);
assertTrue(properties.get("DateTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("OffsetDateTimeTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("DoubleTypeProperty") instanceof Double);
assertTrue(properties.get("GuidTypeProperty") instanceof UUID);
assertTrue(properties.get("Int32TypeProperty") instanceof Integer);
assertTrue(properties.get("Int64TypeProperty") instanceof Long);
assertTrue(properties.get("StringTypeProperty") instanceof String);
}
/*@Test
public void createEntitySubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
SampleEntity tableEntity = new SampleEntity(partitionKeyValue, rowKeyValue);
tableEntity.setByteField(bytes);
tableEntity.setBooleanField(b);
tableEntity.setDateTimeField(dateTime);
tableEntity.setDoubleField(d);
tableEntity.setUuidField(uuid);
tableEntity.setIntField(i);
tableEntity.setLongField(l);
tableEntity.setStringField(s);
tableEntity.setEnumField(color);
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
TableEntity entity = response.getValue();
assertArrayEquals((byte[]) entity.getProperties().get("ByteField"), bytes);
assertEquals(entity.getProperties().get("BooleanField"), b);
assertTrue(dateTime.isEqual((OffsetDateTime) entity.getProperties().get("DateTimeField")));
assertEquals(entity.getProperties().get("DoubleField"), d);
assertEquals(0, uuid.compareTo((UUID) entity.getProperties().get("UuidField")));
assertEquals(entity.getProperties().get("IntField"), i);
assertEquals(entity.getProperties().get("LongField"), l);
assertEquals(entity.getProperties().get("StringField"), s);
assertEquals(entity.getProperties().get("EnumField"), color.name());
}*/
@Test
public void deleteTable() {
assertDoesNotThrow(() -> tableClient.deleteTable());
}
@Test
public void deleteNonExistingTable() {
tableClient.deleteTable();
assertDoesNotThrow(() -> tableClient.deleteTable());
}
@Test
public void deleteTableWithResponse() {
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode, tableClient.deleteTableWithResponse(null, null).getStatusCode());
}
@Test
public void deleteNonExistingTableWithResponse() {
final int expectedStatusCode = 404;
tableClient.deleteTableWithResponse(null, null);
assertEquals(expectedStatusCode, tableClient.deleteTableWithResponse(null, null).getStatusCode());
}
@Test
public void deleteEntity() {
deleteEntityImpl("partitionKey", "rowKey");
}
@Test
public void deleteEntityWithSingleQuotesInPartitionKey() {
deleteEntityImpl("partition'Key", "rowKey");
}
@Test
public void deleteEntityWithSingleQuotesInRowKey() {
deleteEntityImpl("partitionKey", "row'Key");
}
private void deleteEntityImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertDoesNotThrow(() -> tableClient.deleteEntity(partitionKeyValue, rowKeyValue));
}
@Test
public void deleteNonExistingEntity() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
assertDoesNotThrow(() -> tableClient.deleteEntity(partitionKeyValue, rowKeyValue));
}
@Test
public void deleteEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(createdEntity, false, null, null).getStatusCode());
}
@Test
public void deleteNonExistingEntityWithResponse() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 404;
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(entity, false, null, null).getStatusCode());
}
@Test
public void deleteEntityWithResponseMatchETag() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
assertEquals(expectedStatusCode,
tableClient.deleteEntityWithResponse(createdEntity, true, null, null).getStatusCode());
}
@Test
public void getEntityWithSingleQuotesInPartitionKey() {
getEntityWithResponseImpl(tableClient, testResourceNamer, "partition'Key", "rowKey");
}
@Test
public void getEntityWithSingleQuotesInRowKey() {
getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "row'Key");
}
@Test
public void getEntityWithResponse() {
getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
static void getEntityWithResponseImpl(TableClient tableClient, TestResourceNamer testResourceNamer,
String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
}
@Test
public void getEntityWithResponseWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.addProperty("Test", "Value");
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
List<String> propertyList = new ArrayList<>();
propertyList.add("Test");
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, propertyList, null, null);
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertNull(entity.getPartitionKey());
assertNull(entity.getRowKey());
assertNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertEquals(entity.getProperties().get("Test"), "Value");
}
@Test
public void updateEntityWithSingleQuotesInPartitionKey() {
updateEntityWithResponseImpl(TableEntityUpdateMode.MERGE, testResourceNamer.randomName("partition'Key", 20),
testResourceNamer.randomName("rowKey", 20));
}
@Test
public void updateEntityWithSingleQuotesInRowKey() {
updateEntityWithResponseImpl(TableEntityUpdateMode.MERGE, testResourceNamer.randomName("partitionKey", 20),
testResourceNamer.randomName("row'Key", 20));
}
/*@Test
public void getEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
final Map<String, Object> props = new HashMap<>();
props.put("ByteField", bytes);
props.put("BooleanField", b);
props.put("DateTimeField", dateTime);
props.put("DoubleField", d);
props.put("UuidField", uuid);
props.put("IntField", i);
props.put("LongField", l);
props.put("StringField", s);
props.put("EnumField", color);
TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.setProperties(props);
int expectedStatusCode = 200;
tableClient.createEntity(tableEntity);
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, SampleEntity.class, null, null);
SampleEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertArrayEquals(bytes, entity.getByteField());
assertEquals(b, entity.getBooleanField());
assertTrue(dateTime.isEqual(entity.getDateTimeField()));
assertEquals(d, entity.getDoubleField());
assertEquals(0, uuid.compareTo(entity.getUuidField()));
assertEquals(i, entity.getIntField());
assertEquals(l, entity.getLongField());
assertEquals(s, entity.getStringField());
assertEquals(color, entity.getEnumField());
}*/
@Test
public void updateEntityWithResponseReplace() {
updateEntityWithResponseImpl(TableEntityUpdateMode.REPLACE, "partitionKey", "rowKey");
}
@Test
public void updateEntityWithResponseMerge() {
updateEntityWithResponseImpl(TableEntityUpdateMode.MERGE, "partitionKey", "rowKey");
}
/**
* In the case of {@link TableEntityUpdateMode
* In the case of {@link TableEntityUpdateMode
*/
void updateEntityWithResponseImpl(TableEntityUpdateMode mode, String partitionKeyPrefix, String rowKeyPrefix) {
final boolean expectOldProperty = mode == TableEntityUpdateMode.MERGE;
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final int expectedStatusCode = 204;
final String oldPropertyKey = "propertyA";
final String newPropertyKey = "propertyB";
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty(oldPropertyKey, "valueA");
tableClient.createEntity(tableEntity);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
createdEntity.getProperties().remove(oldPropertyKey);
createdEntity.addProperty(newPropertyKey, "valueB");
assertEquals(expectedStatusCode,
tableClient.updateEntityWithResponse(createdEntity, mode, true, null, null).getStatusCode());
TableEntity entity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey(newPropertyKey));
assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey));
}
/*@Test
public void updateEntityWithResponseSubclass() {
String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
int expectedStatusCode = 204;
SingleFieldEntity tableEntity = new SingleFieldEntity(partitionKeyValue, rowKeyValue);
tableEntity.setSubclassProperty("InitialValue");
tableClient.createEntity(tableEntity);
tableEntity.setSubclassProperty("UpdatedValue");
assertEquals(expectedStatusCode,
tableClient.updateEntityWithResponse(tableEntity, TableEntityUpdateMode.REPLACE, true, null, null)
.getStatusCode()));
TableEntity entity = tableClient.getEntity(partitionKeyValue, rowKeyValue);
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey("SubclassProperty"));
assertEquals("UpdatedValue", properties.get("SubclassProperty"));
}*/
@Test
public void listEntities() {
listEntitiesImpl("partitionKey", "rowKey");
}
@Test
public void listEntitiesWithSingleQuotesInPartitionKey() {
listEntitiesImpl("partition'Key", "rowKey");
}
@Test
public void listEntitiesWithSingleQuotesInRowKey() {
listEntitiesImpl("partitionKey", "row'Key");
}
private void listEntitiesImpl(String partitionKeyPrefix, String rowKeyPrefix) {
final String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
final String rowKeyValue = testResourceNamer.randomName(rowKeyPrefix, 20);
final String rowKeyValue2 = testResourceNamer.randomName(rowKeyPrefix, 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities().iterableByPage().iterator();
assertTrue(iterator.hasNext());
List<TableEntity> retrievedEntities = iterator.next().getValue();
assertEquals(2, retrievedEntities.size());
}
@Test
public void listEntitiesWithFilter() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'");
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
tableClient.listEntities(options, null, null).forEach(tableEntity -> {
assertEquals(partitionKeyValue, tableEntity.getPartitionKey());
assertEquals(rowKeyValue, tableEntity.getRowKey());
});
}
@Test
public void listEntitiesWithSelect() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty("propertyC", "valueC")
.addProperty("propertyD", "valueD");
List<String> propertyList = new ArrayList<>();
propertyList.add("propertyC");
ListEntitiesOptions options = new ListEntitiesOptions()
.setSelect(propertyList);
tableClient.createEntity(entity);
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
TableEntity retrievedEntity = iterator.next().getValue().get(0);
assertNull(retrievedEntity.getPartitionKey());
assertNull(retrievedEntity.getRowKey());
assertEquals("valueC", retrievedEntity.getProperties().get("propertyC"));
assertNull(retrievedEntity.getProperties().get("propertyD"));
}
@Test
public void listEntitiesWithTop() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setTop(2);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
/*@Test
public void listEntitiesSubclass() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2));
Iterator<PagedResponse<TableEntity>> iterator =
tableClient.listEntities(SampleEntity.class).iterableByPage().iterator();
assertTrue(iterator.hasNext());
List<TableEntity> retrievedEntities = iterator.next().getValue();
TableEntity retrievedEntity = retrievedEntities.get(0);
TableEntity retrievedEntity2 = retrievedEntities.get(1);
assertEquals(partitionKeyValue, retrievedEntity.getPartitionKey());
assertEquals(rowKeyValue, retrievedEntity.getRowKey());
assertEquals(partitionKeyValue, retrievedEntity2.getPartitionKey());
assertEquals(rowKeyValue2, retrievedEntity2.getRowKey());
}*/
@Test
public void submitTransaction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue2)));
final Response<TableTransactionResult> result =
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
assertNotNull(result);
assertEquals(expectedBatchStatusCode, result.getStatusCode());
assertEquals(transactionalBatch.size(), result.getValue().getTransactionActionResponses().size());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(0).getStatusCode());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(1).getStatusCode());
final Response<TableEntity> response =
tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, null, null);
final TableEntity entity = response.getValue();
assertNotNull(entity);
assertEquals(partitionKeyValue, entity.getPartitionKey());
assertEquals(rowKeyValue, entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
}
@Test
public void submitTransactionAllActions() {
Runnable func = () -> submitTransactionAllActionsImpl("partitionKey", "rowKey");
func.run();
}
@Test
public void submitTransactionAllActionsForEntitiesWithSingleQuotesInPartitionKey() {
submitTransactionAllActionsImpl("partition'Key", "rowKey");
}
@Test
public void submitTransactionAllActionsForEntitiesWithSingleQuotesInRowKey() {
submitTransactionAllActionsImpl("partitionKey", "row'Key");
}
private void submitTransactionAllActionsImpl(String partitionKeyPrefix, String rowKeyPrefix) {
String partitionKeyValue = testResourceNamer.randomName(partitionKeyPrefix, 20);
String rowKeyValueCreate = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertInsert = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertMerge = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpsertReplace = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpdateMerge = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueUpdateReplace = testResourceNamer.randomName(rowKeyPrefix, 20);
String rowKeyValueDelete = testResourceNamer.randomName(rowKeyPrefix, 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace));
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueDelete));
TableEntity toUpsertMerge = new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge);
toUpsertMerge.addProperty("Test", "MergedValue");
TableEntity toUpsertReplace = new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace);
toUpsertReplace.addProperty("Test", "ReplacedValue");
TableEntity toUpdateMerge = new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge);
toUpdateMerge.addProperty("Test", "MergedValue");
TableEntity toUpdateReplace = new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace);
toUpdateReplace.addProperty("Test", "MergedValue");
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValueCreate)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE,
new TableEntity(partitionKeyValue, rowKeyValueUpsertInsert)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE, toUpsertMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_REPLACE, toUpsertReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_MERGE, toUpdateMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_REPLACE, toUpdateReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValueDelete)));
final Response<TableTransactionResult> response =
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
assertNotNull(response);
assertEquals(expectedBatchStatusCode, response.getStatusCode());
TableTransactionResult result = response.getValue();
assertEquals(transactionalBatch.size(), result.getTransactionActionResponses().size());
for (TableTransactionActionResponse subResponse : result.getTransactionActionResponses()) {
assertEquals(expectedOperationStatusCode, subResponse.getStatusCode());
}
}
@Test
public void submitTransactionWithFailingAction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValue2)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("DeleteEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue2));
return;
}
fail();
}
@Test
public void submitTransactionWithSameRowKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue));
return;
} catch (TableServiceException e) {
assertTrue(IS_COSMOS_TEST);
assertEquals(400, e.getResponse().getStatusCode());
assertTrue(e.getMessage().contains("InvalidDuplicateRow"));
return;
}
fail();
}
@Test
public void submitTransactionWithDifferentPartitionKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String partitionKeyValue2 = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue2, rowKeyValue2)));
try {
tableClient.submitTransactionWithResponse(transactionalBatch, null, null);
} catch (TableTransactionFailedException e) {
if (IS_COSMOS_TEST) {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue));
} else {
assertTrue(e.getMessage().contains("An action within the operation failed"));
assertTrue(e.getMessage().contains("The failed operation was"));
assertTrue(e.getMessage().contains("CreateEntity"));
assertTrue(e.getMessage().contains("partitionKey='" + partitionKeyValue2));
assertTrue(e.getMessage().contains("rowKey='" + rowKeyValue2));
}
return;
}
fail();
}
@Test
public void generateSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("r");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("raud");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final String startPartitionKey = "startPartitionKey";
final String startRowKey = "startRowKey";
final String endPartitionKey = "endPartitionKey";
final String endRowKey = "endRowKey";
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange)
.setStartPartitionKey(startPartitionKey)
.setStartRowKey(startRowKey)
.setEndPartitionKey(endPartitionKey)
.setEndRowKey(endRowKey);
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=raud"
+ "&spk=startPartitionKey"
+ "&srk=startRowKey"
+ "&epk=endPartitionKey"
+ "&erk=endRowKey"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setAndListAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id = "testPolicy";
TableSignedIdentifier tableSignedIdentifier = new TableSignedIdentifier(id).setAccessPolicy(tableAccessPolicy);
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode,
tableClient.setAccessPoliciesWithResponse(Collections.singletonList(tableSignedIdentifier), null, null)
.getStatusCode());
TableAccessPolicies tableAccessPolicies = tableClient.getAccessPolicies();
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
TableSignedIdentifier signedIdentifier = tableAccessPolicies.getIdentifiers().get(0);
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
assertEquals(id, signedIdentifier.getId());
}
@Test
public void setAndListMultipleAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id1 = "testPolicy1";
String id2 = "testPolicy2";
List<TableSignedIdentifier> tableSignedIdentifiers = new ArrayList<>();
tableSignedIdentifiers.add(new TableSignedIdentifier(id1).setAccessPolicy(tableAccessPolicy));
tableSignedIdentifiers.add(new TableSignedIdentifier(id2).setAccessPolicy(tableAccessPolicy));
final int expectedStatusCode = 204;
assertEquals(expectedStatusCode,
tableClient.setAccessPoliciesWithResponse(tableSignedIdentifiers, null, null).getStatusCode());
TableAccessPolicies tableAccessPolicies = tableClient.getAccessPolicies();
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
assertEquals(2, tableAccessPolicies.getIdentifiers().size());
assertEquals(id1, tableAccessPolicies.getIdentifiers().get(0).getId());
assertEquals(id2, tableAccessPolicies.getIdentifiers().get(1).getId());
for (TableSignedIdentifier signedIdentifier : tableAccessPolicies.getIdentifiers()) {
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
}
}
@Test
public void allowsCreationOfEntityWithEmptyStringPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
Assertions.assertDoesNotThrow(() -> {
TableEntity entity = new TableEntity("", "");
tableClient.createEntity(entity);
});
}
@Test
public void allowListEntitiesWithEmptyPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
TableEntity entity = new TableEntity("", "");
String entityName = testResourceNamer.randomName("name", 10);
entity.addProperty("Name", entityName);
tableClient.createEntity(entity);
ListEntitiesOptions options = new ListEntitiesOptions();
options.setFilter("PartitionKey eq '' and RowKey eq ''");
PagedIterable<TableEntity> response = tableClient.listEntities(options, Duration.ofSeconds(10), null);
ArrayList<TableEntity> responseArray = new ArrayList<TableEntity>();
for (TableEntity responseEntity : response) {
responseArray.add(responseEntity);
}
assertEquals(1, responseArray.size());
assertEquals(entityName, responseArray.get(0).getProperty("Name"));
}
@Test
public void allowDeleteEntityWithEmptyPrimaryKey() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Empty row or partition keys are not supported on Cosmos endpoints.");
TableEntity entity = new TableEntity("", "");
String entityName = testResourceNamer.randomName("name", 10);
entity.addProperty("Name", entityName);
tableClient.createEntity(entity);
tableClient.deleteEntity(entity);
}
} |
Same comment about using `OffsetDateTime.now()` and adding to it. | public void canUseSasTokenToCreateValidTableClient() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2024, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("a");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(serviceClient.getServiceEndpoint())
.sasToken(sas)
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
} | final OffsetDateTime expiryTime = OffsetDateTime.of(2024, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC); | public void canUseSasTokenToCreateValidTableClient() {
final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("a");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(serviceClient.getServiceEndpoint())
.sasToken(sas)
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
} | class TableServiceAsyncClientTest extends TableServiceClientTestBase {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(100);
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = TestUtils.isCosmosTest();
private TableServiceAsyncClient serviceClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
}
@Override
protected void beforeTest() {
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
serviceClient = getClientBuilder(connectionString).buildAsyncClient();
}
@Test
public void serviceCreateTable() {
String tableName = testResourceNamer.randomName("test", 20);
StepVerifier.create(serviceClient.createTable(tableName))
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void serviceCreateTableWithMultipleTenants() {
Assumptions.assumeTrue(serviceClient.getServiceEndpoint().contains("core.windows.net")
&& serviceClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
String tableName = testResourceNamer.randomName("tableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableServiceAsyncClient tableServiceAsyncClient =
getClientBuilder(Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
StepVerifier.create(tableServiceAsyncClient.createTable(tableName))
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
tableName = testResourceNamer.randomName("tableName", 20);
StepVerifier.create(tableServiceAsyncClient.createTable(tableName))
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
StepVerifier.create(serviceClient.createTableWithResponse(tableName))
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableFailsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.createTable(tableName))
.expectErrorMatches(e -> e instanceof TableServiceException
&& ((TableServiceException) e).getResponse().getStatusCode() == 409)
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableIfNotExists() {
String tableName = testResourceNamer.randomName("test", 20);
StepVerifier.create(serviceClient.createTableIfNotExists(tableName))
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableIfNotExistsSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.createTableIfNotExists(tableName))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableIfNotExistsWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
StepVerifier.create(serviceClient.createTableIfNotExistsWithResponse(tableName))
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableIfNotExistsWithResponseSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 409;
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.createTableIfNotExistsWithResponse(tableName))
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
assertNull(response.getValue());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceDeleteTable() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.deleteTable(tableName))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceDeleteNonExistingTable() {
final String tableName = testResourceNamer.randomName("test", 20);
StepVerifier.create(serviceClient.deleteTable(tableName))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceDeleteTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
serviceClient.createTable(tableName).block();
StepVerifier.create(serviceClient.deleteTableWithResponse(tableName))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceDeleteNonExistingTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 404;
StepVerifier.create(serviceClient.deleteTableWithResponse(tableName))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceListTables() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
serviceClient.createTable(tableName2).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.listTables())
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceListTablesWithFilter() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setFilter("TableName eq '" + tableName + "'");
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
serviceClient.createTable(tableName2).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.listTables(options))
.assertNext(table -> assertEquals(tableName, table.getName()))
.expectNextCount(0)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceListTablesWithTop() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
final String tableName3 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setTop(2);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
serviceClient.createTable(tableName2).block(DEFAULT_TIMEOUT);
serviceClient.createTable(tableName3).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.listTables(options))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceGetTableClient() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
TableAsyncClient tableClient = serviceClient.getTableClient(tableName);
TableAsyncClientTest.getEntityWithResponseAsyncImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
@Test
public void generateAccountSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("r");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateAccountSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("rdau");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange);
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=rdau"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setGetProperties() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and getting properties is not supported on Cosmos endpoints.");
TableServiceRetentionPolicy retentionPolicy = new TableServiceRetentionPolicy()
.setDaysToRetain(5)
.setEnabled(true);
TableServiceLogging logging = new TableServiceLogging()
.setReadLogged(true)
.setAnalyticsVersion("1.0")
.setRetentionPolicy(retentionPolicy);
List<TableServiceCorsRule> corsRules = new ArrayList<>();
corsRules.add(new TableServiceCorsRule()
.setAllowedMethods("GET,PUT,HEAD")
.setAllowedOrigins("*")
.setAllowedHeaders("x-ms-version")
.setExposedHeaders("x-ms-client-request-id")
.setMaxAgeInSeconds(10));
TableServiceMetrics hourMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceMetrics minuteMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceProperties sentProperties = new TableServiceProperties()
.setLogging(logging)
.setCorsRules(corsRules)
.setMinuteMetrics(minuteMetrics)
.setHourMetrics(hourMetrics);
StepVerifier.create(serviceClient.setPropertiesWithResponse(sentProperties))
.assertNext(response -> {
assertNotNull(response.getHeaders().getValue("x-ms-request-id"));
assertNotNull(response.getHeaders().getValue("x-ms-version"));
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
sleepIfRunningAgainstService(30000);
StepVerifier.create(serviceClient.getProperties())
.assertNext(retrievedProperties -> assertPropertiesEquals(sentProperties, retrievedProperties))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void getStatistics() throws URISyntaxException {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Getting statistics is not supported on Cosmos endpoints.");
URI primaryEndpoint = new URI(serviceClient.getServiceEndpoint());
String[] hostParts = primaryEndpoint.getHost().split("\\.");
StringJoiner secondaryHostJoiner = new StringJoiner(".");
secondaryHostJoiner.add(hostParts[0] + "-secondary");
for (int i = 1; i < hostParts.length; i++) {
secondaryHostJoiner.add(hostParts[i]);
}
String secondaryEndpoint = primaryEndpoint.getScheme() + ":
TableServiceAsyncClient secondaryClient = new TableServiceClientBuilder()
.endpoint(secondaryEndpoint)
.serviceVersion(serviceClient.getServiceVersion())
.pipeline(serviceClient.getHttpPipeline())
.buildAsyncClient();
StepVerifier.create(secondaryClient.getStatistics())
.assertNext(statistics -> {
assertNotNull(statistics);
assertNotNull(statistics.getGeoReplication());
assertNotNull(statistics.getGeoReplication().getStatus());
assertNotNull(statistics.getGeoReplication().getLastSyncTime());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
} | class TableServiceAsyncClientTest extends TableServiceClientTestBase {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(100);
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = TestUtils.isCosmosTest();
private TableServiceAsyncClient serviceClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertAsync()
.build();
}
@Override
protected void beforeTest() {
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
serviceClient = getClientBuilder(connectionString).buildAsyncClient();
}
@Test
public void serviceCreateTable() {
String tableName = testResourceNamer.randomName("test", 20);
StepVerifier.create(serviceClient.createTable(tableName))
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void serviceCreateTableWithMultipleTenants() {
Assumptions.assumeTrue(serviceClient.getServiceEndpoint().contains("core.windows.net")
&& serviceClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
String tableName = testResourceNamer.randomName("tableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableServiceAsyncClient tableServiceAsyncClient =
getClientBuilder(Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
StepVerifier.create(tableServiceAsyncClient.createTable(tableName))
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
tableName = testResourceNamer.randomName("tableName", 20);
StepVerifier.create(tableServiceAsyncClient.createTable(tableName))
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
StepVerifier.create(serviceClient.createTableWithResponse(tableName))
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableFailsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.createTable(tableName))
.expectErrorMatches(e -> e instanceof TableServiceException
&& ((TableServiceException) e).getResponse().getStatusCode() == 409)
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableIfNotExists() {
String tableName = testResourceNamer.randomName("test", 20);
StepVerifier.create(serviceClient.createTableIfNotExists(tableName))
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableIfNotExistsSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.createTableIfNotExists(tableName))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableIfNotExistsWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
StepVerifier.create(serviceClient.createTableIfNotExistsWithResponse(tableName))
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceCreateTableIfNotExistsWithResponseSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 409;
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.createTableIfNotExistsWithResponse(tableName))
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
assertNull(response.getValue());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceDeleteTable() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.deleteTable(tableName))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceDeleteNonExistingTable() {
final String tableName = testResourceNamer.randomName("test", 20);
StepVerifier.create(serviceClient.deleteTable(tableName))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceDeleteTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
serviceClient.createTable(tableName).block();
StepVerifier.create(serviceClient.deleteTableWithResponse(tableName))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceDeleteNonExistingTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 404;
StepVerifier.create(serviceClient.deleteTableWithResponse(tableName))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceListTables() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
serviceClient.createTable(tableName2).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.listTables())
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceListTablesWithFilter() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setFilter("TableName eq '" + tableName + "'");
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
serviceClient.createTable(tableName2).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.listTables(options))
.assertNext(table -> assertEquals(tableName, table.getName()))
.expectNextCount(0)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceListTablesWithTop() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
final String tableName3 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setTop(2);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
serviceClient.createTable(tableName2).block(DEFAULT_TIMEOUT);
serviceClient.createTable(tableName3).block(DEFAULT_TIMEOUT);
StepVerifier.create(serviceClient.listTables(options))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void serviceGetTableClient() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName).block(DEFAULT_TIMEOUT);
TableAsyncClient tableClient = serviceClient.getTableClient(tableName);
TableAsyncClientTest.getEntityWithResponseAsyncImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
@Test
public void generateAccountSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("r");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateAccountSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("rdau");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange);
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=rdau"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setGetProperties() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and getting properties is not supported on Cosmos endpoints.");
TableServiceRetentionPolicy retentionPolicy = new TableServiceRetentionPolicy()
.setDaysToRetain(5)
.setEnabled(true);
TableServiceLogging logging = new TableServiceLogging()
.setReadLogged(true)
.setAnalyticsVersion("1.0")
.setRetentionPolicy(retentionPolicy);
List<TableServiceCorsRule> corsRules = new ArrayList<>();
corsRules.add(new TableServiceCorsRule()
.setAllowedMethods("GET,PUT,HEAD")
.setAllowedOrigins("*")
.setAllowedHeaders("x-ms-version")
.setExposedHeaders("x-ms-client-request-id")
.setMaxAgeInSeconds(10));
TableServiceMetrics hourMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceMetrics minuteMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceProperties sentProperties = new TableServiceProperties()
.setLogging(logging)
.setCorsRules(corsRules)
.setMinuteMetrics(minuteMetrics)
.setHourMetrics(hourMetrics);
StepVerifier.create(serviceClient.setPropertiesWithResponse(sentProperties))
.assertNext(response -> {
assertNotNull(response.getHeaders().getValue("x-ms-request-id"));
assertNotNull(response.getHeaders().getValue("x-ms-version"));
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
sleepIfRunningAgainstService(30000);
StepVerifier.create(serviceClient.getProperties())
.assertNext(retrievedProperties -> assertPropertiesEquals(sentProperties, retrievedProperties))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
@Test
public void getStatistics() throws URISyntaxException {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Getting statistics is not supported on Cosmos endpoints.");
URI primaryEndpoint = new URI(serviceClient.getServiceEndpoint());
String[] hostParts = primaryEndpoint.getHost().split("\\.");
StringJoiner secondaryHostJoiner = new StringJoiner(".");
secondaryHostJoiner.add(hostParts[0] + "-secondary");
for (int i = 1; i < hostParts.length; i++) {
secondaryHostJoiner.add(hostParts[i]);
}
String secondaryEndpoint = primaryEndpoint.getScheme() + ":
TableServiceAsyncClient secondaryClient = new TableServiceClientBuilder()
.endpoint(secondaryEndpoint)
.serviceVersion(serviceClient.getServiceVersion())
.pipeline(serviceClient.getHttpPipeline())
.buildAsyncClient();
StepVerifier.create(secondaryClient.getStatistics())
.assertNext(statistics -> {
assertNotNull(statistics);
assertNotNull(statistics.getGeoReplication());
assertNotNull(statistics.getGeoReplication().getStatus());
assertNotNull(statistics.getGeoReplication().getLastSyncTime());
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
}
} |
Same comment about using `OffsetDateTime.now()` and adding to it. | public void canUseSasTokenToCreateValidTableClient() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2024, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("a");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(serviceClient.getServiceEndpoint())
.sasToken(sas)
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
} | final OffsetDateTime expiryTime = OffsetDateTime.of(2024, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC); | public void canUseSasTokenToCreateValidTableClient() {
final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("a");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(serviceClient.getServiceEndpoint())
.sasToken(sas)
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
} | class TableServiceClientTest extends TableServiceClientTestBase {
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = TestUtils.isCosmosTest();
private TableServiceClient serviceClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
@Override
protected void beforeTest() {
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
serviceClient = getClientBuilder(connectionString).buildClient();
}
@Test
public void serviceCreateTable() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTable(tableName));
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void serviceCreateTableWithMultipleTenants() {
Assumptions.assumeTrue(serviceClient.getServiceEndpoint().contains("core.windows.net")
&& serviceClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
String tableName = testResourceNamer.randomName("tableName", 20);
String secondTableName = testResourceNamer.randomName("secondTableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableServiceClient tableServiceClient =
getClientBuilder(Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
assertNotNull(tableServiceClient.createTable(tableName));
assertNotNull(tableServiceClient.createTable(secondTableName));
}
@Test
public void serviceCreateTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableWithResponse(tableName, null, null).getValue());
}
@Test
public void serviceCreateTableFailsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertThrows(TableServiceException.class, () -> serviceClient.createTable(tableName));
}
@Test
public void serviceCreateTableIfNotExists() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
}
@Test
public void serviceCreateTableIfNotExistsWithResponseSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 409;
serviceClient.createTable(tableName);
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNull(response.getValue());
}
@Test
public void serviceDeleteTable() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertDoesNotThrow(() -> serviceClient.deleteTable(tableName));
}
@Test
public void serviceDeleteNonExistingTable() {
final String tableName = testResourceNamer.randomName("test", 20);
assertDoesNotThrow(() -> serviceClient.createTable(tableName));
}
@Test
public void serviceDeleteTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
serviceClient.createTable(tableName);
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceDeleteNonExistingTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 404;
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceListTables() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
Iterator<PagedResponse<TableItem>> iterator = serviceClient.listTables().iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertTrue(2 <= iterator.next().getValue().size());
}
@Test
public void serviceListTablesWithFilter() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setFilter("TableName eq '" + tableName + "'");
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.listTables(options, null, null)
.forEach(tableItem -> assertEquals(tableName, tableItem.getName()));
}
@Test
public void serviceListTablesWithTop() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
final String tableName3 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setTop(2);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.createTable(tableName3);
Iterator<PagedResponse<TableItem>> iterator =
serviceClient.listTables(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
@Test
public void serviceGetTableClient() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
TableClient tableClient = serviceClient.getTableClient(tableName);
TableClientTest.getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
@Test
public void generateAccountSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("r");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateAccountSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("rdau");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange);
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=rdau"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setGetProperties() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and getting properties is not supported on Cosmos endpoints.");
TableServiceRetentionPolicy retentionPolicy = new TableServiceRetentionPolicy()
.setDaysToRetain(5)
.setEnabled(true);
TableServiceLogging logging = new TableServiceLogging()
.setReadLogged(true)
.setAnalyticsVersion("1.0")
.setRetentionPolicy(retentionPolicy);
List<TableServiceCorsRule> corsRules = new ArrayList<>();
corsRules.add(new TableServiceCorsRule()
.setAllowedMethods("GET,PUT,HEAD")
.setAllowedOrigins("*")
.setAllowedHeaders("x-ms-version")
.setExposedHeaders("x-ms-client-request-id")
.setMaxAgeInSeconds(10));
TableServiceMetrics hourMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceMetrics minuteMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceProperties sentProperties = new TableServiceProperties()
.setLogging(logging)
.setCorsRules(corsRules)
.setMinuteMetrics(minuteMetrics)
.setHourMetrics(hourMetrics);
Response<Void> response = serviceClient.setPropertiesWithResponse(sentProperties, null, null);
assertNotNull(response.getHeaders().getValue("x-ms-request-id"));
assertNotNull(response.getHeaders().getValue("x-ms-version"));
sleepIfRunningAgainstService(20000);
TableServiceProperties retrievedProperties = serviceClient.getProperties();
assertPropertiesEquals(sentProperties, retrievedProperties);
}
@Test
public void getStatistics() throws URISyntaxException {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Getting statistics is not supported on Cosmos endpoints.");
URI primaryEndpoint = new URI(serviceClient.getServiceEndpoint());
String[] hostParts = primaryEndpoint.getHost().split("\\.");
StringJoiner secondaryHostJoiner = new StringJoiner(".");
secondaryHostJoiner.add(hostParts[0] + "-secondary");
for (int i = 1; i < hostParts.length; i++) {
secondaryHostJoiner.add(hostParts[i]);
}
String secondaryEndpoint = primaryEndpoint.getScheme() + ":
TableServiceClient secondaryClient = new TableServiceClientBuilder()
.endpoint(secondaryEndpoint)
.serviceVersion(serviceClient.getServiceVersion())
.pipeline(serviceClient.getHttpPipeline())
.buildClient();
TableServiceStatistics statistics = secondaryClient.getStatistics();
assertNotNull(statistics);
assertNotNull(statistics.getGeoReplication());
assertNotNull(statistics.getGeoReplication().getStatus());
assertNotNull(statistics.getGeoReplication().getLastSyncTime());
}
} | class TableServiceClientTest extends TableServiceClientTestBase {
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = TestUtils.isCosmosTest();
private TableServiceClient serviceClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
@Override
protected void beforeTest() {
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
serviceClient = getClientBuilder(connectionString).buildClient();
}
@Test
public void serviceCreateTable() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTable(tableName));
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void serviceCreateTableWithMultipleTenants() {
Assumptions.assumeTrue(serviceClient.getServiceEndpoint().contains("core.windows.net")
&& serviceClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
String tableName = testResourceNamer.randomName("tableName", 20);
String secondTableName = testResourceNamer.randomName("secondTableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableServiceClient tableServiceClient =
getClientBuilder(Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
assertNotNull(tableServiceClient.createTable(tableName));
assertNotNull(tableServiceClient.createTable(secondTableName));
}
@Test
public void serviceCreateTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableWithResponse(tableName, null, null).getValue());
}
@Test
public void serviceCreateTableFailsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertThrows(TableServiceException.class, () -> serviceClient.createTable(tableName));
}
@Test
public void serviceCreateTableIfNotExists() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
}
@Test
public void serviceCreateTableIfNotExistsWithResponseSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 409;
serviceClient.createTable(tableName);
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNull(response.getValue());
}
@Test
public void serviceDeleteTable() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertDoesNotThrow(() -> serviceClient.deleteTable(tableName));
}
@Test
public void serviceDeleteNonExistingTable() {
final String tableName = testResourceNamer.randomName("test", 20);
assertDoesNotThrow(() -> serviceClient.createTable(tableName));
}
@Test
public void serviceDeleteTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
serviceClient.createTable(tableName);
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceDeleteNonExistingTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 404;
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceListTables() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
Iterator<PagedResponse<TableItem>> iterator = serviceClient.listTables().iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertTrue(2 <= iterator.next().getValue().size());
}
@Test
public void serviceListTablesWithFilter() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setFilter("TableName eq '" + tableName + "'");
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.listTables(options, null, null)
.forEach(tableItem -> assertEquals(tableName, tableItem.getName()));
}
@Test
public void serviceListTablesWithTop() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
final String tableName3 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setTop(2);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.createTable(tableName3);
Iterator<PagedResponse<TableItem>> iterator =
serviceClient.listTables(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
@Test
public void serviceGetTableClient() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
TableClient tableClient = serviceClient.getTableClient(tableName);
TableClientTest.getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
@Test
public void generateAccountSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("r");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateAccountSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("rdau");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange);
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=rdau"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setGetProperties() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and getting properties is not supported on Cosmos endpoints.");
TableServiceRetentionPolicy retentionPolicy = new TableServiceRetentionPolicy()
.setDaysToRetain(5)
.setEnabled(true);
TableServiceLogging logging = new TableServiceLogging()
.setReadLogged(true)
.setAnalyticsVersion("1.0")
.setRetentionPolicy(retentionPolicy);
List<TableServiceCorsRule> corsRules = new ArrayList<>();
corsRules.add(new TableServiceCorsRule()
.setAllowedMethods("GET,PUT,HEAD")
.setAllowedOrigins("*")
.setAllowedHeaders("x-ms-version")
.setExposedHeaders("x-ms-client-request-id")
.setMaxAgeInSeconds(10));
TableServiceMetrics hourMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceMetrics minuteMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceProperties sentProperties = new TableServiceProperties()
.setLogging(logging)
.setCorsRules(corsRules)
.setMinuteMetrics(minuteMetrics)
.setHourMetrics(hourMetrics);
Response<Void> response = serviceClient.setPropertiesWithResponse(sentProperties, null, null);
assertNotNull(response.getHeaders().getValue("x-ms-request-id"));
assertNotNull(response.getHeaders().getValue("x-ms-version"));
sleepIfRunningAgainstService(20000);
TableServiceProperties retrievedProperties = serviceClient.getProperties();
assertPropertiesEquals(sentProperties, retrievedProperties);
}
@Test
public void getStatistics() throws URISyntaxException {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Getting statistics is not supported on Cosmos endpoints.");
URI primaryEndpoint = new URI(serviceClient.getServiceEndpoint());
String[] hostParts = primaryEndpoint.getHost().split("\\.");
StringJoiner secondaryHostJoiner = new StringJoiner(".");
secondaryHostJoiner.add(hostParts[0] + "-secondary");
for (int i = 1; i < hostParts.length; i++) {
secondaryHostJoiner.add(hostParts[i]);
}
String secondaryEndpoint = primaryEndpoint.getScheme() + ":
TableServiceClient secondaryClient = new TableServiceClientBuilder()
.endpoint(secondaryEndpoint)
.serviceVersion(serviceClient.getServiceVersion())
.pipeline(serviceClient.getHttpPipeline())
.buildClient();
TableServiceStatistics statistics = secondaryClient.getStatistics();
assertNotNull(statistics);
assertNotNull(statistics.getGeoReplication());
assertNotNull(statistics.getGeoReplication().getStatus());
assertNotNull(statistics.getGeoReplication().getLastSyncTime());
}
} |
Let's punt this feedback to another PR as there are some other things going on here that already existed and not part of this PR | public void canUseSasTokenToCreateValidTableClient() {
final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("a");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(serviceClient.getServiceEndpoint())
.sasToken(sas)
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
} | final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); | public void canUseSasTokenToCreateValidTableClient() {
final OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("a");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(serviceClient.getServiceEndpoint())
.sasToken(sas)
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
} | class TableServiceClientTest extends TableServiceClientTestBase {
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = TestUtils.isCosmosTest();
private TableServiceClient serviceClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
@Override
protected void beforeTest() {
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
serviceClient = getClientBuilder(connectionString).buildClient();
}
@Test
public void serviceCreateTable() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTable(tableName));
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void serviceCreateTableWithMultipleTenants() {
Assumptions.assumeTrue(serviceClient.getServiceEndpoint().contains("core.windows.net")
&& serviceClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
String tableName = testResourceNamer.randomName("tableName", 20);
String secondTableName = testResourceNamer.randomName("secondTableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableServiceClient tableServiceClient =
getClientBuilder(Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
assertNotNull(tableServiceClient.createTable(tableName));
assertNotNull(tableServiceClient.createTable(secondTableName));
}
@Test
public void serviceCreateTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableWithResponse(tableName, null, null).getValue());
}
@Test
public void serviceCreateTableFailsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertThrows(TableServiceException.class, () -> serviceClient.createTable(tableName));
}
@Test
public void serviceCreateTableIfNotExists() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
}
@Test
public void serviceCreateTableIfNotExistsWithResponseSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 409;
serviceClient.createTable(tableName);
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNull(response.getValue());
}
@Test
public void serviceDeleteTable() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertDoesNotThrow(() -> serviceClient.deleteTable(tableName));
}
@Test
public void serviceDeleteNonExistingTable() {
final String tableName = testResourceNamer.randomName("test", 20);
assertDoesNotThrow(() -> serviceClient.createTable(tableName));
}
@Test
public void serviceDeleteTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
serviceClient.createTable(tableName);
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceDeleteNonExistingTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 404;
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceListTables() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
Iterator<PagedResponse<TableItem>> iterator = serviceClient.listTables().iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertTrue(2 <= iterator.next().getValue().size());
}
@Test
public void serviceListTablesWithFilter() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setFilter("TableName eq '" + tableName + "'");
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.listTables(options, null, null)
.forEach(tableItem -> assertEquals(tableName, tableItem.getName()));
}
@Test
public void serviceListTablesWithTop() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
final String tableName3 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setTop(2);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.createTable(tableName3);
Iterator<PagedResponse<TableItem>> iterator =
serviceClient.listTables(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
@Test
public void serviceGetTableClient() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
TableClient tableClient = serviceClient.getTableClient(tableName);
TableClientTest.getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
@Test
public void generateAccountSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("r");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateAccountSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("rdau");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange);
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=rdau"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setGetProperties() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and getting properties is not supported on Cosmos endpoints.");
TableServiceRetentionPolicy retentionPolicy = new TableServiceRetentionPolicy()
.setDaysToRetain(5)
.setEnabled(true);
TableServiceLogging logging = new TableServiceLogging()
.setReadLogged(true)
.setAnalyticsVersion("1.0")
.setRetentionPolicy(retentionPolicy);
List<TableServiceCorsRule> corsRules = new ArrayList<>();
corsRules.add(new TableServiceCorsRule()
.setAllowedMethods("GET,PUT,HEAD")
.setAllowedOrigins("*")
.setAllowedHeaders("x-ms-version")
.setExposedHeaders("x-ms-client-request-id")
.setMaxAgeInSeconds(10));
TableServiceMetrics hourMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceMetrics minuteMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceProperties sentProperties = new TableServiceProperties()
.setLogging(logging)
.setCorsRules(corsRules)
.setMinuteMetrics(minuteMetrics)
.setHourMetrics(hourMetrics);
Response<Void> response = serviceClient.setPropertiesWithResponse(sentProperties, null, null);
assertNotNull(response.getHeaders().getValue("x-ms-request-id"));
assertNotNull(response.getHeaders().getValue("x-ms-version"));
sleepIfRunningAgainstService(20000);
TableServiceProperties retrievedProperties = serviceClient.getProperties();
assertPropertiesEquals(sentProperties, retrievedProperties);
}
@Test
public void getStatistics() throws URISyntaxException {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Getting statistics is not supported on Cosmos endpoints.");
URI primaryEndpoint = new URI(serviceClient.getServiceEndpoint());
String[] hostParts = primaryEndpoint.getHost().split("\\.");
StringJoiner secondaryHostJoiner = new StringJoiner(".");
secondaryHostJoiner.add(hostParts[0] + "-secondary");
for (int i = 1; i < hostParts.length; i++) {
secondaryHostJoiner.add(hostParts[i]);
}
String secondaryEndpoint = primaryEndpoint.getScheme() + ":
TableServiceClient secondaryClient = new TableServiceClientBuilder()
.endpoint(secondaryEndpoint)
.serviceVersion(serviceClient.getServiceVersion())
.pipeline(serviceClient.getHttpPipeline())
.buildClient();
TableServiceStatistics statistics = secondaryClient.getStatistics();
assertNotNull(statistics);
assertNotNull(statistics.getGeoReplication());
assertNotNull(statistics.getGeoReplication().getStatus());
assertNotNull(statistics.getGeoReplication().getLastSyncTime());
}
} | class TableServiceClientTest extends TableServiceClientTestBase {
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = TestUtils.isCosmosTest();
private TableServiceClient serviceClient;
protected HttpClient buildAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.skipRequest((ignored1, ignored2) -> false)
.assertSync()
.build();
}
@Override
protected void beforeTest() {
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
serviceClient = getClientBuilder(connectionString).buildClient();
}
@Test
public void serviceCreateTable() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTable(tableName));
}
/**
* Tests that a table and entity can be created while having a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@Test
public void serviceCreateTableWithMultipleTenants() {
Assumptions.assumeTrue(serviceClient.getServiceEndpoint().contains("core.windows.net")
&& serviceClient.getServiceVersion() == TableServiceVersion.V2020_12_06);
String tableName = testResourceNamer.randomName("tableName", 20);
String secondTableName = testResourceNamer.randomName("secondTableName", 20);
TokenCredential credential = null;
if (interceptorManager.isPlaybackMode()) {
credential = new MockTokenCredential();
} else {
credential = new ClientSecretCredentialBuilder()
.clientId(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_ID", "clientId"))
.clientSecret(Configuration.getGlobalConfiguration().get("TABLES_CLIENT_SECRET", "clientSecret"))
.tenantId(testResourceNamer.randomUuid())
.additionallyAllowedTenants("*")
.build();
}
final TableServiceClient tableServiceClient =
getClientBuilder(Configuration.getGlobalConfiguration().get("TABLES_ENDPOINT",
"https:
assertNotNull(tableServiceClient.createTable(tableName));
assertNotNull(tableServiceClient.createTable(secondTableName));
}
@Test
public void serviceCreateTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableWithResponse(tableName, null, null).getValue());
}
@Test
public void serviceCreateTableFailsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertThrows(TableServiceException.class, () -> serviceClient.createTable(tableName));
}
@Test
public void serviceCreateTableIfNotExists() {
String tableName = testResourceNamer.randomName("test", 20);
assertNotNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertNull(serviceClient.createTableIfNotExists(tableName));
}
@Test
public void serviceCreateTableIfNotExistsWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(response.getValue());
}
@Test
public void serviceCreateTableIfNotExistsWithResponseSucceedsIfExists() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 409;
serviceClient.createTable(tableName);
final Response<TableClient> response = serviceClient.createTableIfNotExistsWithResponse(tableName, null, null);
assertEquals(expectedStatusCode, response.getStatusCode());
assertNull(response.getValue());
}
@Test
public void serviceDeleteTable() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
assertDoesNotThrow(() -> serviceClient.deleteTable(tableName));
}
@Test
public void serviceDeleteNonExistingTable() {
final String tableName = testResourceNamer.randomName("test", 20);
assertDoesNotThrow(() -> serviceClient.createTable(tableName));
}
@Test
public void serviceDeleteTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 204;
serviceClient.createTable(tableName);
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceDeleteNonExistingTableWithResponse() {
String tableName = testResourceNamer.randomName("test", 20);
int expectedStatusCode = 404;
assertEquals(expectedStatusCode, serviceClient.deleteTableWithResponse(tableName, null, null).getStatusCode());
}
@Test
public void serviceListTables() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
Iterator<PagedResponse<TableItem>> iterator = serviceClient.listTables().iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertTrue(2 <= iterator.next().getValue().size());
}
@Test
public void serviceListTablesWithFilter() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setFilter("TableName eq '" + tableName + "'");
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.listTables(options, null, null)
.forEach(tableItem -> assertEquals(tableName, tableItem.getName()));
}
@Test
public void serviceListTablesWithTop() {
final String tableName = testResourceNamer.randomName("test", 20);
final String tableName2 = testResourceNamer.randomName("test", 20);
final String tableName3 = testResourceNamer.randomName("test", 20);
ListTablesOptions options = new ListTablesOptions().setTop(2);
serviceClient.createTable(tableName);
serviceClient.createTable(tableName2);
serviceClient.createTable(tableName3);
Iterator<PagedResponse<TableItem>> iterator =
serviceClient.listTables(options, null, null).iterableByPage().iterator();
assertTrue(iterator.hasNext());
assertEquals(2, iterator.next().getValue().size());
}
@Test
public void serviceGetTableClient() {
final String tableName = testResourceNamer.randomName("test", 20);
serviceClient.createTable(tableName);
TableClient tableClient = serviceClient.getTableClient(tableName);
TableClientTest.getEntityWithResponseImpl(tableClient, testResourceNamer, "partitionKey", "rowKey");
}
@Test
public void generateAccountSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("r");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateAccountSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableAccountSasPermission permissions = TableAccountSasPermission.parse("rdau");
final TableAccountSasService services = new TableAccountSasService().setTableAccess(true);
final TableAccountSasResourceType resourceTypes = new TableAccountSasResourceType().setObject(true);
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final TableAccountSasSignatureValues sasSignatureValues =
new TableAccountSasSignatureValues(expiryTime, permissions, services, resourceTypes)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange);
final String sas = serviceClient.generateAccountSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&ss=t"
+ "&srt=o"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&sp=rdau"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
@Test
public void setGetProperties() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and getting properties is not supported on Cosmos endpoints.");
TableServiceRetentionPolicy retentionPolicy = new TableServiceRetentionPolicy()
.setDaysToRetain(5)
.setEnabled(true);
TableServiceLogging logging = new TableServiceLogging()
.setReadLogged(true)
.setAnalyticsVersion("1.0")
.setRetentionPolicy(retentionPolicy);
List<TableServiceCorsRule> corsRules = new ArrayList<>();
corsRules.add(new TableServiceCorsRule()
.setAllowedMethods("GET,PUT,HEAD")
.setAllowedOrigins("*")
.setAllowedHeaders("x-ms-version")
.setExposedHeaders("x-ms-client-request-id")
.setMaxAgeInSeconds(10));
TableServiceMetrics hourMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceMetrics minuteMetrics = new TableServiceMetrics()
.setEnabled(true)
.setVersion("1.0")
.setRetentionPolicy(retentionPolicy)
.setIncludeApis(true);
TableServiceProperties sentProperties = new TableServiceProperties()
.setLogging(logging)
.setCorsRules(corsRules)
.setMinuteMetrics(minuteMetrics)
.setHourMetrics(hourMetrics);
Response<Void> response = serviceClient.setPropertiesWithResponse(sentProperties, null, null);
assertNotNull(response.getHeaders().getValue("x-ms-request-id"));
assertNotNull(response.getHeaders().getValue("x-ms-version"));
sleepIfRunningAgainstService(20000);
TableServiceProperties retrievedProperties = serviceClient.getProperties();
assertPropertiesEquals(sentProperties, retrievedProperties);
}
@Test
public void getStatistics() throws URISyntaxException {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Getting statistics is not supported on Cosmos endpoints.");
URI primaryEndpoint = new URI(serviceClient.getServiceEndpoint());
String[] hostParts = primaryEndpoint.getHost().split("\\.");
StringJoiner secondaryHostJoiner = new StringJoiner(".");
secondaryHostJoiner.add(hostParts[0] + "-secondary");
for (int i = 1; i < hostParts.length; i++) {
secondaryHostJoiner.add(hostParts[i]);
}
String secondaryEndpoint = primaryEndpoint.getScheme() + ":
TableServiceClient secondaryClient = new TableServiceClientBuilder()
.endpoint(secondaryEndpoint)
.serviceVersion(serviceClient.getServiceVersion())
.pipeline(serviceClient.getHttpPipeline())
.buildClient();
TableServiceStatistics statistics = secondaryClient.getStatistics();
assertNotNull(statistics);
assertNotNull(statistics.getGeoReplication());
assertNotNull(statistics.getGeoReplication().getStatus());
assertNotNull(statistics.getGeoReplication().getLastSyncTime());
}
} |
Let's make this a follow-up but I don't think this design was right :/ This timeout is applied to the callable which is just the creation of the `PagedIterable`, it isn't applied to the listing operations where I believe it should have been applied. | public PagedIterable<TableEntity> listEntities(ListEntitiesOptions options, Duration timeout, Context context) {
Supplier<PagedIterable<TableEntity>> callable = () -> new PagedIterable<>(
() -> listEntitiesFirstPage(context, options, TableEntity.class),
token -> listEntitiesNextPage(token, context, options, TableEntity.class));
return callIterableWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
} | return callIterableWithOptionalTimeout(callable, THREAD_POOL, timeout, logger); | public PagedIterable<TableEntity> listEntities(ListEntitiesOptions options, Duration timeout, Context context) {
Supplier<PagedIterable<TableEntity>> callable = () -> new PagedIterable<>(
() -> listEntitiesFirstPage(context, options, TableEntity.class),
token -> listEntitiesNextPage(token, context, options, TableEntity.class));
return callIterableWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
} | class TableClient {
private static final ExecutorService THREAD_POOL = TableUtils.getThreadPoolWithShutdownHook();
private final ClientLogger logger = new ClientLogger(TableClient.class);
private final String tableName;
private final AzureTableImpl tablesImplementation;
private final TransactionalBatchImpl transactionalBatchImplementation;
private final String accountName;
private final String tableEndpoint;
private final HttpPipeline pipeline;
private final TableClient transactionalBatchClient;
TableClient(String tableName, HttpPipeline pipeline, String serviceUrl, TableServiceVersion serviceVersion,
SerializerAdapter tablesSerializer, SerializerAdapter transactionalBatchSerializer) {
try {
if (tableName == null) {
throw new NullPointerException(("'tableName' must not be null to create TableClient."));
}
if (tableName.isEmpty()) {
throw new IllegalArgumentException("'tableName' must not be empty to create a TableClient.");
}
final URI uri = URI.create(serviceUrl);
this.accountName = uri.getHost().split("\\.", 2)[0];
this.tableEndpoint = uri.resolve("/" + tableName).toString();
logger.verbose("Table Service URI: {}", uri);
} catch (NullPointerException | IllegalArgumentException ex) {
throw logger.logExceptionAsError(ex);
}
this.tablesImplementation = new AzureTableImplBuilder()
.url(serviceUrl)
.serializerAdapter(tablesSerializer)
.pipeline(pipeline)
.version(serviceVersion.getVersion())
.buildClient();
this.transactionalBatchImplementation =
new TransactionalBatchImpl(tablesImplementation, transactionalBatchSerializer);
this.tableName = tableName;
this.pipeline = tablesImplementation.getHttpPipeline();
this.transactionalBatchClient = new TableClient(this, serviceVersion, tablesSerializer);
}
TableClient(TableClient client, ServiceVersion serviceVersion, SerializerAdapter tablesSerializer) {
this.accountName = client.getAccountName();
this.tableEndpoint = client.getTableEndpoint();
this.pipeline = BuilderHelper.buildNullClientPipeline();
this.tablesImplementation = new AzureTableImplBuilder()
.url(client.getTablesImplementation().getUrl())
.serializerAdapter(tablesSerializer)
.pipeline(this.pipeline)
.version(serviceVersion.getVersion())
.buildClient();
this.tableName = client.getTableName();
this.transactionalBatchImplementation = null;
this.transactionalBatchClient = null;
}
/**
* Gets the name of the table.
*
* @return The name of the table.
*/
public String getTableName() {
return tableName;
}
/**
* Gets the name of the account containing the table.
*
* @return The name of the account containing the table.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the endpoint for this table.
*
* @return The endpoint for this table.
*/
public String getTableEndpoint() {
return tableEndpoint;
}
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the {@link AzureTableImpl} powering this client.
*
* @return This client's {@link AzureTableImpl}.
*/
AzureTableImpl getTablesImplementation() {
return tablesImplementation;
}
/**
* Gets the REST API version used by this client.
*
* @return The REST API version used by this client.
*/
public TableServiceVersion getServiceVersion() {
return TableServiceVersion.fromString(tablesImplementation.getVersion());
}
/**
* Generates a service SAS for the table using the specified {@link TableSasSignatureValues}.
*
* <p><strong>Note:</strong> The client must be authenticated via {@link AzureNamedKeyCredential}.</p>
* <p>See {@link TableSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* @param tableSasSignatureValues {@link TableSasSignatureValues}.
*
* @return A {@code String} representing the SAS query parameters.
*
* @throws IllegalStateException If this {@link TableClient} is not authenticated with an
* {@link AzureNamedKeyCredential}.
*/
public String generateSas(TableSasSignatureValues tableSasSignatureValues) {
AzureNamedKeyCredential azureNamedKeyCredential = TableSasUtils.extractNamedKeyCredential(getHttpPipeline());
if (azureNamedKeyCredential == null) {
throw logger.logExceptionAsError(new IllegalStateException("Cannot generate a SAS token with a client that"
+ " is not authenticated with an AzureNamedKeyCredential."));
}
return new TableSasGenerator(tableSasSignatureValues, getTableName(), azureNamedKeyCredential).getSas();
}
/**
* Creates the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Creates a table. Prints out the details of the created table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createTable -->
* <pre>
* TableItem tableItem = tableClient.createTable&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createTable -->
*
* @return A {@link TableItem} that represents the table.
*
* @throws TableServiceException If a table with the same name already exists within the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableItem createTable() {
return createTableWithResponse(null, null).getValue();
}
/**
* Creates the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Creates a table. Prints out the details of the {@link Response HTTP response} and the created table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createTableWithResponse
* <pre>
* Response<TableItem> response = tableClient.createTableWithResponse&
* new Context&
*
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createTableWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response} containing a {@link TableItem} that represents the table.
*
* @throws TableServiceException If a table with the same name already exists within the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableItem> createTableWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
final TableProperties properties = new TableProperties().setTableName(tableName);
Supplier<Response<TableItem>> callable = () ->
new SimpleResponse<>(tablesImplementation.getTables().createWithResponse(properties, null,
ResponseFormat.RETURN_NO_CONTENT, null, contextValue),
TableItemAccessHelper.createItem(new TableResponseProperties().setTableName(tableName)));
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Deletes the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteTable -->
* <pre>
* tableClient.deleteTable&
*
* System.out.print&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteTable -->
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteTable() {
deleteTableWithResponse(null, null);
}
/**
* Deletes the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a table. Prints out the details of the {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteTableWithResponse
* <pre>
* Response<Void> response = tableClient.deleteTableWithResponse&
* new Context&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteTableWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteTableWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
Supplier<Response<Void>> callable = () -> new SimpleResponse<>(tablesImplementation.getTables()
.deleteWithResponse(tableName, null, contextValue), null);
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
Throwable except = mapThrowableToTableServiceException(ex);
return swallow404Exception(except);
}
}
private Response<Void> swallow404Exception(Throwable ex) {
if (ex instanceof TableServiceException
&& ((TableServiceException) ex).getResponse().getStatusCode() == 404) {
return new SimpleResponse<>(
((TableServiceException) ex).getResponse().getRequest(),
((TableServiceException) ex).getResponse().getStatusCode(),
((TableServiceException) ex).getResponse().getHeaders(),
null);
} else {
throw logger.logExceptionAsError((RuntimeException) (TableUtils.mapThrowableToTableServiceException(ex)));
}
}
/**
* Inserts an {@link TableEntity entity} into the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Inserts an {@link TableEntity entity} into the table. Prints out the details of the created
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.createEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createEntity
*
* @param entity The {@link TableEntity entity} to insert.
*
* @throws TableServiceException If an {@link TableEntity entity} with the same partition key and row key already
* exists within the table.
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void createEntity(TableEntity entity) {
createEntityWithResponse(entity, null, null);
}
/**
* Inserts an {@link TableEntity entity} into the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Inserts an {@link TableEntity entity} into the table. Prints out the details of the
* {@link Response HTTP response} and the created {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createEntityWithResponse
* <pre>
*
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.createEntityWithResponse&
* new Context&
*
* System.out.printf&
* + " '%s' was created.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createEntityWithResponse
*
* @param entity The {@link TableEntity entity} to insert.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If an {@link TableEntity entity} with the same partition key and row key already
* exists within the table.
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> createEntityWithResponse(TableEntity entity, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
Response<Map<String, Object>> response = tablesImplementation.getTables().insertEntityWithResponse(
tableName, null, null, ResponseFormat.RETURN_NO_CONTENT,
entity.getProperties(), null, contextValue);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null);
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Inserts an {@link TableEntity entity} into the table if it does not exist, or merges the
* {@link TableEntity entity} with the existing {@link TableEntity entity} otherwise.
*
* <p><strong>Code Samples</strong></p>
* <p>Upserts an {@link TableEntity entity} into the table. Prints out the details of the upserted
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.upsertEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.upsertEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.upsertEntity
*
* @param entity The {@link TableEntity entity} to upsert.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void upsertEntity(TableEntity entity) {
upsertEntityWithResponse(entity, null, null, null);
}
/**
* Inserts an {@link TableEntity entity} into the table if it does not exist, or updates the existing
* {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode} otherwise. The default
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity
* entity}.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Upserts an {@link TableEntity entity} into the table with the specified
* {@link TableEntityUpdateMode update mode} if said {@link TableEntity entity} already exists. Prints out the
* details of the {@link Response HTTP response} and the upserted {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.upsertEntityWithResponse
* <pre>
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.upsertEntityWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* + " '%s' was updated&
* </pre>
* <!-- end com.azure.data.tables.tableClient.upsertEntityWithResponse
*
* @param entity The {@link TableEntity entity} to upsert.
* @param updateMode The type of update to perform if the {@link TableEntity entity} already exits.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> upsertEntityWithResponse(TableEntity entity, TableEntityUpdateMode updateMode,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
String partitionKey = TableUtils.escapeSingleQuotes(entity.getPartitionKey());
String rowKey = TableUtils.escapeSingleQuotes(entity.getRowKey());
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
if (updateMode == TableEntityUpdateMode.REPLACE) {
return tablesImplementation.getTables().updateEntityWithResponse(
tableName, partitionKey, rowKey, null, null, null,
entity.getProperties(), null, contextValue);
} else {
return tablesImplementation.getTables().mergeEntityWithResponse(
tableName, partitionKey, rowKey, null, null, null,
entity.getProperties(), null, contextValue);
}
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Updates an existing {@link TableEntity entity} by merging the provided {@link TableEntity entity} with the
* existing {@link TableEntity entity}.
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table. Prints out the details of the updated
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.updateEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntity
*
* @param entity The {@link TableEntity entity} to update.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void updateEntity(TableEntity entity) {
updateEntity(entity, null);
}
/**
* Updates an existing {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode}.
* The default {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity entity}.
* </p>
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table with the specified
* {@link TableEntityUpdateMode update mode}. Prints out the details of the updated {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntity
* <pre>
*
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* tableClient.updateEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntity
*
* @param entity The {@link TableEntity entity} to update.
* @param updateMode The type of update to perform.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void updateEntity(TableEntity entity, TableEntityUpdateMode updateMode) {
updateEntityWithResponse(entity, updateMode, false, null, null);
}
/**
* Updates an existing {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode}.
* The default {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity entity}.
* </p>
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table with the specified {@link TableEntityUpdateMode update
* mode}
* if the {@code ETags} on both {@link TableEntity entities} match. Prints out the details of the
* {@link Response HTTP response} updated {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntityWithResponse
* <pre>
* TableEntity someTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.updateEntityWithResponse&
* true, Duration.ofSeconds&
*
* System.out.printf&
* + " '%s' was updated.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntityWithResponse
*
* @param entity The {@link TableEntity entity} to update.
* @param updateMode The type of update to perform.
* @param ifUnchanged When true, the ETag of the provided {@link TableEntity entity} must match the ETag of the
* {@link TableEntity entity} in the Table service. If the values do not match, the update will not occur and an
* exception will be thrown.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table, or if {@code ifUnchanged} is {@code true} and the existing {@link TableEntity entity}'s ETag
* does not match that of the provided {@link TableEntity entity}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> updateEntityWithResponse(TableEntity entity, TableEntityUpdateMode updateMode,
boolean ifUnchanged, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
String partitionKey = TableUtils.escapeSingleQuotes(entity.getPartitionKey());
String rowKey = TableUtils.escapeSingleQuotes(entity.getRowKey());
String eTag = ifUnchanged ? entity.getETag() : "*";
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
if (updateMode == TableEntityUpdateMode.REPLACE) {
return tablesImplementation.getTables()
.updateEntityWithResponse(tableName, partitionKey, rowKey, null, null, eTag,
entity.getProperties(), null, contextValue);
} else {
return tablesImplementation.getTables()
.mergeEntityWithResponse(tableName, partitionKey, rowKey, null, null, eTag,
entity.getProperties(), null, contextValue);
}
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes an {@link TableEntity entity} on the table. Prints out the entity's {@code partitionKey} and
* {@code rowKey}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntity
* <pre>
* tableClient.deleteEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntity
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The row key of the {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null} or
* empty.
* @throws TableServiceException If the request is rejected by the service.
* @throws IllegalArgumentException If 'partitionKey' or 'rowKey' is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteEntity(String partitionKey, String rowKey) {
deleteEntityWithResponse(partitionKey, rowKey, null, false, null, null);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a {@link TableEntity entity} on the table. Prints out the details of the deleted
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntity
* <pre>
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* tableClient.deleteEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntity
*
* @param entity The {@link TableEntity entity} to delete.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteEntity(TableEntity entity) {
deleteEntityWithResponse(entity, false, null, null);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a {@link TableEntity entity} on the table. Prints out the details of the
* {@link Response HTTP response} and the deleted {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntityWithResponse
* <pre>
* TableEntity someTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.deleteEntityWithResponse&
* new Context&
*
* System.out.printf&
* + " '%s' was deleted.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntityWithResponse
*
* @param entity The table {@link TableEntity entity} to delete.
* @param ifUnchanged When true, the ETag of the provided {@link TableEntity entity} must match the ETag of the
* {@link TableEntity entity} in the Table service. If the values do not match, the update will not occur and an
* exception will be thrown.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
* @throws IllegalArgumentException If the entity has null 'partitionKey' or 'rowKey'.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteEntityWithResponse(TableEntity entity, boolean ifUnchanged, Duration timeout,
Context context) {
return deleteEntityWithResponse(
entity.getPartitionKey(), entity.getRowKey(), entity.getETag(), ifUnchanged, timeout, context);
}
private Response<Void> deleteEntityWithResponse(String partitionKey, String rowKey, String eTag, boolean ifUnchanged,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
String finalETag = ifUnchanged ? eTag : "*";
if (partitionKey == null || rowKey == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'partitionKey' and 'rowKey' cannot be null"));
}
Supplier<Response<Void>> callable = () -> tablesImplementation.getTables().deleteEntityWithResponse(
tableName, TableUtils.escapeSingleQuotes(partitionKey), TableUtils.escapeSingleQuotes(rowKey), finalETag,
null, null, null, contextValue);
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
return swallow404Exception(mapThrowableToTableServiceException(ex));
}
}
/**
* Lists all {@link TableEntity entities} within the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Lists all {@link TableEntity entities} on the table. Prints out the details of the
* retrieved {@link TableEntity entities}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.listEntities -->
* <pre>
* PagedIterable<TableEntity> tableEntities = tableClient.listEntities&
*
* tableEntities.forEach&
* System.out.printf&
* tableEntity.getPartitionKey&
* </pre>
* <!-- end com.azure.data.tables.tableClient.listEntities -->
*
* @return A {@link PagedIterable} containing all {@link TableEntity entities} within the table.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<TableEntity> listEntities() {
return listEntities(new ListEntitiesOptions(), null, null);
}
/**
* Lists {@link TableEntity entities} using the parameters in the provided options.
*
* <p>If the {@code filter} parameter in the options is set, only {@link TableEntity entities} matching the filter
* will be returned. If the {@code select} parameter is set, only the properties included in the select parameter
* will be returned for each {@link TableEntity entity}. If the {@code top} parameter is set, the maximum number of
* returned {@link TableEntity entities} per page will be limited to that value.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Lists all {@link TableEntity entities} on the table. Prints out the details of the
* {@link Response HTTP response} and all the retrieved {@link TableEntity entities}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.listEntities
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* ListEntitiesOptions listEntitiesOptions = new ListEntitiesOptions&
* .setTop&
* .setFilter&
* .setSelect&
*
* PagedIterable<TableEntity> myTableEntities = tableClient.listEntities&
* Duration.ofSeconds&
*
* myTableEntities.forEach&
* System.out.printf&
* tableEntity.getPartitionKey&
*
* tableEntity.getProperties&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.tables.tableClient.listEntities
*
* @param options The {@code filter}, {@code select}, and {@code top} OData query options to apply to this
* operation.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return A {@link PagedIterable} containing matching {@link TableEntity entities} within the table.
*
* @throws IllegalArgumentException If one or more of the OData query options in {@code options} is malformed.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
private <T extends TableEntity> PagedResponse<T> listEntitiesFirstPage(Context context,
ListEntitiesOptions options,
Class<T> resultType) {
return listEntities(null, null, context, options, resultType);
}
private <T extends TableEntity> PagedResponse<T> listEntitiesNextPage(String token, Context context,
ListEntitiesOptions options,
Class<T> resultType) {
if (token == null) {
return null;
}
try {
String[] keys = TableUtils.getKeysFromToken(token);
return listEntities(keys[0], keys[1], context, options, resultType);
} catch (RuntimeException ex) {
throw logger.logExceptionAsError(ex);
}
}
private <T extends TableEntity> PagedResponse<T> listEntities(String nextPartitionKey, String nextRowKey,
Context context, ListEntitiesOptions options,
Class<T> resultType) {
Context contextValue = TableUtils.setContext(context, true);
String select = null;
if (options.getSelect() != null) {
select = String.join(",", options.getSelect());
}
QueryOptions queryOptions = new QueryOptions()
.setFilter(options.getFilter())
.setTop(options.getTop())
.setSelect(select)
.setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA);
final ResponseBase<TablesQueryEntitiesHeaders, TableEntityQueryResponse> response =
tablesImplementation.getTables().queryEntitiesWithResponse(tableName, null, null,
nextPartitionKey, nextRowKey, queryOptions, contextValue);
final TableEntityQueryResponse tablesQueryEntityResponse = response.getValue();
if (tablesQueryEntityResponse == null) {
return null;
}
final List<Map<String, Object>> entityResponseValue = tablesQueryEntityResponse.getValue();
if (entityResponseValue == null) {
return null;
}
final List<T> entities = entityResponseValue.stream()
.map(TableEntityAccessHelper::createEntity)
.map(e -> EntityHelper.convertToSubclass(e, resultType, logger))
.collect(Collectors.toList());
return new EntityPaged<>(response, entities,
response.getDeserializedHeaders().getXMsContinuationNextPartitionKey(),
response.getDeserializedHeaders().getXMsContinuationNextRowKey());
}
/**
* Gets a single {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets an {@link TableEntity entity} on the table. Prints out the details of the retrieved
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getEntity
* <pre>
* TableEntity tableEntity = tableClient.getEntity&
*
* System.out.printf&
* tableEntity.getRowKey&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getEntity
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The partition key of the {@link TableEntity entity}.
*
* @return The {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null} or
* empty.
* @throws TableServiceException If no {@link TableEntity entity} with the provided {@code partitionKey} and
* {@code rowKey} exists within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableEntity getEntity(String partitionKey, String rowKey) {
return getEntityWithResponse(partitionKey, rowKey, null, null, null).getValue();
}
/**
* Gets a single {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets an {@link TableEntity entity} on the table. Prints out the details of the {@link Response HTTP response}
* retrieved {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getEntityWithResponse
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* Response<TableEntity> response = tableClient.getEntityWithResponse&
* Duration.ofSeconds&
*
* TableEntity myTableEntity = response.getValue&
*
* System.out.printf&
* + " '%s' and properties:", response.getStatusCode&
* myTableEntity.getRowKey&
*
* myTableEntity.getProperties&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getEntityWithResponse
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The partition key of the {@link TableEntity entity}.
* @param select A list of properties to select on the {@link TableEntity entity}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response} containing the {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null}
* or if the {@code select} OData query option is malformed.
* @throws TableServiceException If no {@link TableEntity entity} with the provided {@code partitionKey} and
* {@code rowKey} exists within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableEntity> getEntityWithResponse(String partitionKey, String rowKey, List<String> select,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
QueryOptions queryOptions = new QueryOptions()
.setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA);
if (select != null) {
queryOptions.setSelect(String.join(",", select));
}
if (partitionKey == null || rowKey == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("'partitionKey' and 'rowKey' cannot be null."));
}
Supplier<Response<TableEntity>> callable = () -> {
ResponseBase<TablesQueryEntityWithPartitionAndRowKeyHeaders, Map<String, Object>> response =
tablesImplementation.getTables().queryEntityWithPartitionAndRowKeyWithResponse(
tableName, TableUtils.escapeSingleQuotes(partitionKey), TableUtils.escapeSingleQuotes(rowKey), null,
null, queryOptions, contextValue);
final Map<String, Object> matchingEntity = response.getValue();
if (matchingEntity == null || matchingEntity.isEmpty()) {
logger.info("There was no matching entity. Table {}, partition key: {}, row key: {}.",
tableName, partitionKey, rowKey);
return null;
}
final TableEntity entity = TableEntityAccessHelper.createEntity(matchingEntity);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
EntityHelper.convertToSubclass(entity, TableEntity.class, logger));
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Retrieves details about any stored {@link TableAccessPolicies access policies} specified on the table that may
* be used with Shared Access Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Gets a table's {@link TableAccessPolicies access policies}. Prints out the details of the retrieved
* {@link TableAccessPolicies access policies}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getAccessPolicies -->
* <pre>
* TableAccessPolicies accessPolicies = tableClient.getAccessPolicies&
*
* accessPolicies.getIdentifiers&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getAccessPolicies -->
*
* @return The table's {@link TableAccessPolicies access policies}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableAccessPolicies getAccessPolicies() {
return getAccessPoliciesWithResponse(null, null).getValue();
}
/**
* Retrieves details about any stored {@link TableAccessPolicies access policies} specified on the table that may be
* used with Shared Access Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Gets a table's {@link TableAccessPolicies access policies}. Prints out the details of the
* {@link Response HTTP response} and the retrieved {@link TableAccessPolicies access policies}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getAccessPoliciesWithResponse
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* Response<TableAccessPolicies> response = tableClient.getAccessPoliciesWithResponse&
* new Context&
*
* System.out.printf&
* + " IDs:", response.getStatusCode&
*
* response.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getAccessPoliciesWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return An {@link Response HTTP response} containing the table's {@link TableAccessPolicies access policies}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableAccessPolicies> getAccessPoliciesWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
Supplier<Response<TableAccessPolicies>> callable = () -> {
ResponseBase<TablesGetAccessPolicyHeaders, List<SignedIdentifier>> response =
tablesImplementation.getTables().getAccessPolicyWithResponse(tableName, null, null, contextValue);
return new SimpleResponse<>(response,
new TableAccessPolicies(response.getValue() == null ? null : response.getValue().stream()
.map(TableUtils::toTableSignedIdentifier)
.collect(Collectors.toList())));
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Sets stored {@link TableAccessPolicies access policies} for the table that may be used with Shared Access
* Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Sets stored {@link TableAccessPolicies access policies} on a table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.setAccessPolicies
* <pre>
* List<TableSignedIdentifier> signedIdentifiers = new ArrayList<>&
*
* signedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
* signedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* tableClient.setAccessPolicies&
*
* System.out.print&
* </pre>
* <!-- end com.azure.data.tables.tableClient.setAccessPolicies
*
* @param tableSignedIdentifiers The {@link TableSignedIdentifier access policies} for the table.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessPolicies(List<TableSignedIdentifier> tableSignedIdentifiers) {
setAccessPoliciesWithResponse(tableSignedIdentifiers, null, null);
}
/**
* Sets stored {@link TableAccessPolicies access policies} for the table that may be used with Shared Access
* Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Sets stored {@link TableAccessPolicies access policies} on a table. Prints out details of the
* {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.setAccessPoliciesWithResponse
* <pre>
* List<TableSignedIdentifier> mySignedIdentifiers = new ArrayList<>&
*
* mySignedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
* mySignedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* Response<Void> response = tableClient.setAccessPoliciesWithResponse&
* new Context&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.setAccessPoliciesWithResponse
*
* @param tableSignedIdentifiers The {@link TableSignedIdentifier access policies} for the table.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessPoliciesWithResponse(List<TableSignedIdentifier> tableSignedIdentifiers,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
List<SignedIdentifier> signedIdentifiers = null;
if (tableSignedIdentifiers != null) {
signedIdentifiers = tableSignedIdentifiers.stream()
.map(tableSignedIdentifier -> {
SignedIdentifier signedIdentifier = TableUtils.toSignedIdentifier(tableSignedIdentifier);
if (signedIdentifier != null) {
if (signedIdentifier.getAccessPolicy() != null
&& signedIdentifier.getAccessPolicy().getStart() != null) {
signedIdentifier.getAccessPolicy()
.setStart(signedIdentifier.getAccessPolicy()
.getStart().truncatedTo(ChronoUnit.SECONDS));
}
if (signedIdentifier.getAccessPolicy() != null
&& signedIdentifier.getAccessPolicy().getExpiry() != null) {
signedIdentifier.getAccessPolicy()
.setExpiry(signedIdentifier.getAccessPolicy()
.getExpiry().truncatedTo(ChronoUnit.SECONDS));
}
}
return signedIdentifier;
})
.collect(Collectors.toList());
}
List<SignedIdentifier> finalSignedIdentifiers = signedIdentifiers;
Supplier<Response<Void>> callable = () -> {
ResponseBase<TablesSetAccessPolicyHeaders, Void> response = tablesImplementation.getTables()
.setAccessPolicyWithResponse(tableName, null, null,
finalSignedIdentifiers, contextValue);
return new SimpleResponse<>(response, response.getValue());
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Executes all {@link TableTransactionAction actions} within the list inside a transaction. When the call
* completes, either all {@link TableTransactionAction actions} in the transaction will succeed, or if a failure
* occurs, all {@link TableTransactionAction actions} in the transaction will be rolled back.
* {@link TableTransactionAction Actions} are executed sequantially. Each {@link TableTransactionAction action}
* must operate on a distinct row key. Attempting to pass multiple {@link TableTransactionAction actions} that
* share the same row key will cause an error.
*
* <p><strong>Code Samples</strong></p>
* <p>Submits a transaction that contains multiple {@link TableTransactionAction actions} to be applied to
* {@link TableEntity entities} on a table. Prints out details of each {@link TableTransactionAction action}'s
* {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransaction
* <pre>
* List<TableTransactionAction> transactionActions = new ArrayList<>&
*
* String partitionKey = "markers";
* String firstEntityRowKey = "m001";
* String secondEntityRowKey = "m002";
*
* TableEntity firstEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* transactionActions.add&
*
* System.out.printf&
* firstEntityRowKey&
*
* TableEntity secondEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* transactionActions.add&
*
* System.out.printf&
* secondEntityRowKey&
*
* TableTransactionResult tableTransactionResult = tableClient.submitTransaction&
*
* System.out.print&
*
* tableTransactionResult.getTransactionActionResponses&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransaction
* <p>Shows how to handle a transaction with a failing {@link TableTransactionAction action} via the provided
* {@link TableTransactionFailedException exception}, which contains the index of the first failing action in the
* transaction.</p>
* <!-- src_embed com.azure.data.tables.tableAsyncClient.submitTransactionWithError
* <pre>
*
* tableAsyncClient.submitTransaction&
* .contextWrite&
* .doOnError&
* &
* int failedActionIndex = e.getFailedTransactionActionIndex&
* &
* &
* transactionActions.remove&
* &
* &
* .subscribe&
* System.out.print&
*
* tableTransactionResult.getTransactionActionResponses&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.tables.tableAsyncClient.submitTransactionWithError
*
* @param transactionActions A {@link List} of {@link TableTransactionAction actions} to perform on
* {@link TableEntity entities} in a table.
*
* @return A {@link List} of {@link TableTransactionActionResponse sub-responses} that correspond to each
* {@link TableTransactionResult action} in the transaction.
*
* @throws IllegalArgumentException If no {@link TableTransactionAction actions} have been added to the list.
* @throws TableServiceException If the request is rejected by the service.
* @throws TableTransactionFailedException If any {@link TableTransactionResult action} within the transaction
* fails. See the documentation for the client methods in {@link TableClient} to understand the conditions that
* may cause a given {@link TableTransactionAction action} to fail.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableTransactionResult submitTransaction(List<TableTransactionAction> transactionActions) {
return submitTransactionWithResponse(transactionActions, null, null).getValue();
}
/**
* Executes all {@link TableTransactionAction actions} within the list inside a transaction. When the call
* completes, either all {@link TableTransactionAction actions} in the transaction will succeed, or if a failure
* occurs, all {@link TableTransactionAction actions} in the transaction will be rolled back.
* {@link TableTransactionAction Actions} are executed sequantially. Each {@link TableTransactionAction action}
* must operate on a distinct row key. Attempting to pass multiple {@link TableTransactionAction actions} that
* share the same row key will cause an error.
*
* <p><strong>Code Samples</strong></p>
* <p>Submits a transaction that contains multiple {@link TableTransactionAction actions} to be applied to
* {@link TableEntity entities} on a table. Prints out details of the {@link Response HTTP response} for the
* operation, as well as each {@link TableTransactionAction action}'s corresponding {@link Response HTTP
* response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransactionWithResponse
* <pre>
* List<TableTransactionAction> myTransactionActions = new ArrayList<>&
*
* String myPartitionKey = "markers";
* String myFirstEntityRowKey = "m001";
* String mySecondEntityRowKey = "m002";
*
* TableEntity myFirstEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* myTransactionActions.add&
*
* System.out.printf&
* myFirstEntityRowKey&
*
* TableEntity mySecondEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* myTransactionActions.add&
*
* System.out.printf&
* mySecondEntityRowKey&
*
* Response<TableTransactionResult> response = tableClient.submitTransactionWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* + " actions are:", response.getStatusCode&
*
* response.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransactionWithResponse
* <p>Shows how to handle a transaction with a failing {@link TableTransactionAction action} via the provided
* {@link TableTransactionFailedException exception}, which contains the index of the first failing action in the
* transaction.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransactionWithResponseWithError
* <pre>
* try &
* Response<TableTransactionResult> transactionResultResponse =
* tableClient.submitTransactionWithResponse&
* new Context&
*
* System.out.printf&
* + " submitted actions are:", transactionResultResponse.getStatusCode&
*
* transactionResultResponse.getValue&
* .forEach&
* System.out.printf&
* &
* &
* int failedActionIndex = e.getFailedTransactionActionIndex&
* &
* &
* myTransactionActions.remove&
* &
* &
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransactionWithResponseWithError
*
* @param transactionActions A {@link List} of {@link TableTransactionAction transaction actions} to perform on
* {@link TableEntity entities} in a table.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return An {@link Response HTTP response} produced for the transaction itself. The response's value will contain
* a {@link List} of {@link TableTransactionActionResponse sub-responses} that correspond to each
* {@link TableTransactionAction action} in the transaction.
*
* @throws IllegalArgumentException If no {@link TableTransactionAction actions} have been added to the list.
* @throws TableServiceException If the request is rejected by the service.
* @throws TableTransactionFailedException If any {@link TableTransactionAction action} within the transaction
* fails. See the documentation for the client methods in {@link TableClient} to understand the conditions that
* may cause a given {@link TableTransactionAction action} to fail.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableTransactionResult> submitTransactionWithResponse(List<TableTransactionAction> transactionActions, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (transactionActions.isEmpty()) {
throw logger.logExceptionAsError(
new IllegalArgumentException("A transaction must contain at least one operation."));
}
final List<TransactionalBatchAction> operations = new ArrayList<>();
for (TableTransactionAction transactionAction : transactionActions) {
switch (transactionAction.getActionType()) {
case CREATE:
operations.add(new TransactionalBatchAction.CreateEntity(transactionAction.getEntity()));
break;
case UPSERT_MERGE:
operations.add(new TransactionalBatchAction.UpsertEntity(transactionAction.getEntity(),
TableEntityUpdateMode.MERGE));
break;
case UPSERT_REPLACE:
operations.add(new TransactionalBatchAction.UpsertEntity(transactionAction.getEntity(),
TableEntityUpdateMode.REPLACE));
break;
case UPDATE_MERGE:
operations.add(new TransactionalBatchAction.UpdateEntity(transactionAction.getEntity(),
TableEntityUpdateMode.MERGE, transactionAction.getIfUnchanged()));
break;
case UPDATE_REPLACE:
operations.add(new TransactionalBatchAction.UpdateEntity(transactionAction.getEntity(),
TableEntityUpdateMode.REPLACE, transactionAction.getIfUnchanged()));
break;
case DELETE:
operations.add(
new TransactionalBatchAction.DeleteEntity(transactionAction.getEntity(),
transactionAction.getIfUnchanged()));
break;
default:
break;
}
}
Supplier<Response<TableTransactionResult>> callable = () -> {
BiConsumer<TransactionalBatchRequestBody, RequestActionPair> accumulator = (body, pair) ->
body.addChangeOperation(new TransactionalBatchSubRequest(pair.getAction(), pair.getRequest()));
BiConsumer<TransactionalBatchRequestBody, TransactionalBatchRequestBody> combiner = (body1, body2) ->
body2.getContents().forEach(req -> body1.addChangeOperation((TransactionalBatchSubRequest) req));
TransactionalBatchRequestBody requestBody =
operations.stream()
.map(op -> new RequestActionPair(op.prepareRequest(transactionalBatchClient), op))
.collect(TransactionalBatchRequestBody::new, accumulator, combiner);
ResponseBase<TransactionalBatchSubmitBatchHeaders, TableTransactionActionResponse[]> response =
transactionalBatchImplementation
.submitTransactionalBatchWithRestResponse(requestBody, null, contextValue);
Response<List<TableTransactionActionResponse>> parsedResponse = parseResponse(requestBody, response);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new TableTransactionResult(transactionActions, parsedResponse.getValue()));
};
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
throw logger.logExceptionAsError((RuntimeException) TableUtils.interpretException(ex));
}
}
private static class RequestActionPair {
private final HttpRequest request;
private final TransactionalBatchAction action;
RequestActionPair(HttpRequest request, TransactionalBatchAction action) {
this.request = request;
this.action = action;
}
public HttpRequest getRequest() {
return request;
}
public TransactionalBatchAction getAction() {
return action;
}
}
private Response<List<TableTransactionActionResponse>> parseResponse(TransactionalBatchRequestBody requestBody,
ResponseBase<TransactionalBatchSubmitBatchHeaders, TableTransactionActionResponse[]> response) {
TableServiceError error = null;
String errorMessage = null;
TransactionalBatchChangeSet changes = null;
TransactionalBatchAction failedAction = null;
Integer failedIndex = null;
if (requestBody.getContents().get(0) instanceof TransactionalBatchChangeSet) {
changes = (TransactionalBatchChangeSet) requestBody.getContents().get(0);
}
for (int i = 0; i < response.getValue().length; i++) {
TableTransactionActionResponse subResponse = response.getValue()[i];
if (changes != null && changes.getContents().get(i) != null) {
TableTransactionActionResponseAccessHelper.updateTableTransactionActionResponse(subResponse,
changes.getContents().get(i).getHttpRequest());
}
if (subResponse.getStatusCode() >= 400 && error == null && errorMessage == null) {
if (subResponse.getValue() instanceof TableServiceError) {
error = (TableServiceError) subResponse.getValue();
if (changes != null && error.getOdataError() != null
&& error.getOdataError().getMessage() != null
&& error.getOdataError().getMessage().getValue() != null) {
String message = error.getOdataError().getMessage().getValue();
try {
failedIndex = Integer.parseInt(message.substring(0, message.indexOf(":")));
failedAction = changes.getContents().get(failedIndex).getOperation();
} catch (NumberFormatException e) {
}
}
} else if (subResponse.getValue() instanceof String) {
errorMessage = "The service returned the following data for the failed operation: "
+ subResponse.getValue();
} else {
errorMessage =
"The service returned the following status code for the failed operation: "
+ subResponse.getStatusCode();
}
}
}
if (error != null || errorMessage != null) {
String message = "An action within the operation failed, the transaction has been rolled back.";
if (failedAction != null) {
message += " The failed operation was: " + failedAction;
} else if (errorMessage != null) {
message += " " + errorMessage;
}
throw logger.logExceptionAsError(new RuntimeException(
new TableTransactionFailedException(message, null, toTableServiceError(error), failedIndex)));
} else {
return new SimpleResponse<>(response, Arrays.asList(response.getValue()));
}
}
} | class TableClient {
private static final ExecutorService THREAD_POOL = TableUtils.getThreadPoolWithShutdownHook();
private final ClientLogger logger = new ClientLogger(TableClient.class);
private final String tableName;
private final AzureTableImpl tablesImplementation;
private final TransactionalBatchImpl transactionalBatchImplementation;
private final String accountName;
private final String tableEndpoint;
private final HttpPipeline pipeline;
private final TableClient transactionalBatchClient;
TableClient(String tableName, HttpPipeline pipeline, String serviceUrl, TableServiceVersion serviceVersion,
SerializerAdapter tablesSerializer, SerializerAdapter transactionalBatchSerializer) {
try {
if (tableName == null) {
throw new NullPointerException(("'tableName' must not be null to create TableClient."));
}
if (tableName.isEmpty()) {
throw new IllegalArgumentException("'tableName' must not be empty to create a TableClient.");
}
final URI uri = URI.create(serviceUrl);
this.accountName = uri.getHost().split("\\.", 2)[0];
this.tableEndpoint = uri.resolve("/" + tableName).toString();
logger.verbose("Table Service URI: {}", uri);
} catch (NullPointerException | IllegalArgumentException ex) {
throw logger.logExceptionAsError(ex);
}
this.tablesImplementation = new AzureTableImplBuilder()
.url(serviceUrl)
.serializerAdapter(tablesSerializer)
.pipeline(pipeline)
.version(serviceVersion.getVersion())
.buildClient();
this.transactionalBatchImplementation =
new TransactionalBatchImpl(tablesImplementation, transactionalBatchSerializer);
this.tableName = tableName;
this.pipeline = tablesImplementation.getHttpPipeline();
this.transactionalBatchClient = new TableClient(this, serviceVersion, tablesSerializer);
}
TableClient(TableClient client, ServiceVersion serviceVersion, SerializerAdapter tablesSerializer) {
this.accountName = client.getAccountName();
this.tableEndpoint = client.getTableEndpoint();
this.pipeline = BuilderHelper.buildNullClientPipeline();
this.tablesImplementation = new AzureTableImplBuilder()
.url(client.getTablesImplementation().getUrl())
.serializerAdapter(tablesSerializer)
.pipeline(this.pipeline)
.version(serviceVersion.getVersion())
.buildClient();
this.tableName = client.getTableName();
this.transactionalBatchImplementation = null;
this.transactionalBatchClient = null;
}
/**
* Gets the name of the table.
*
* @return The name of the table.
*/
public String getTableName() {
return tableName;
}
/**
* Gets the name of the account containing the table.
*
* @return The name of the account containing the table.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the endpoint for this table.
*
* @return The endpoint for this table.
*/
public String getTableEndpoint() {
return tableEndpoint;
}
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the {@link AzureTableImpl} powering this client.
*
* @return This client's {@link AzureTableImpl}.
*/
AzureTableImpl getTablesImplementation() {
return tablesImplementation;
}
/**
* Gets the REST API version used by this client.
*
* @return The REST API version used by this client.
*/
public TableServiceVersion getServiceVersion() {
return TableServiceVersion.fromString(tablesImplementation.getVersion());
}
/**
* Generates a service SAS for the table using the specified {@link TableSasSignatureValues}.
*
* <p><strong>Note:</strong> The client must be authenticated via {@link AzureNamedKeyCredential}.</p>
* <p>See {@link TableSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* @param tableSasSignatureValues {@link TableSasSignatureValues}.
*
* @return A {@code String} representing the SAS query parameters.
*
* @throws IllegalStateException If this {@link TableClient} is not authenticated with an
* {@link AzureNamedKeyCredential}.
*/
public String generateSas(TableSasSignatureValues tableSasSignatureValues) {
AzureNamedKeyCredential azureNamedKeyCredential = TableSasUtils.extractNamedKeyCredential(getHttpPipeline());
if (azureNamedKeyCredential == null) {
throw logger.logExceptionAsError(new IllegalStateException("Cannot generate a SAS token with a client that"
+ " is not authenticated with an AzureNamedKeyCredential."));
}
return new TableSasGenerator(tableSasSignatureValues, getTableName(), azureNamedKeyCredential).getSas();
}
/**
* Creates the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Creates a table. Prints out the details of the created table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createTable -->
* <pre>
* TableItem tableItem = tableClient.createTable&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createTable -->
*
* @return A {@link TableItem} that represents the table.
*
* @throws TableServiceException If a table with the same name already exists within the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableItem createTable() {
return createTableWithResponse(null, null).getValue();
}
/**
* Creates the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Creates a table. Prints out the details of the {@link Response HTTP response} and the created table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createTableWithResponse
* <pre>
* Response<TableItem> response = tableClient.createTableWithResponse&
* new Context&
*
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createTableWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response} containing a {@link TableItem} that represents the table.
*
* @throws TableServiceException If a table with the same name already exists within the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableItem> createTableWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
final TableProperties properties = new TableProperties().setTableName(tableName);
Supplier<Response<TableItem>> callable = () ->
new SimpleResponse<>(tablesImplementation.getTables().createWithResponse(properties, null,
ResponseFormat.RETURN_NO_CONTENT, null, contextValue),
TableItemAccessHelper.createItem(new TableResponseProperties().setTableName(tableName)));
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Deletes the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteTable -->
* <pre>
* tableClient.deleteTable&
*
* System.out.print&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteTable -->
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteTable() {
deleteTableWithResponse(null, null);
}
/**
* Deletes the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a table. Prints out the details of the {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteTableWithResponse
* <pre>
* Response<Void> response = tableClient.deleteTableWithResponse&
* new Context&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteTableWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteTableWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
Supplier<Response<Void>> callable = () -> new SimpleResponse<>(tablesImplementation.getTables()
.deleteWithResponse(tableName, null, contextValue), null);
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
Throwable except = mapThrowableToTableServiceException(ex);
return swallow404Exception(except);
}
}
private Response<Void> swallow404Exception(Throwable ex) {
if (ex instanceof TableServiceException
&& ((TableServiceException) ex).getResponse().getStatusCode() == 404) {
return new SimpleResponse<>(
((TableServiceException) ex).getResponse().getRequest(),
((TableServiceException) ex).getResponse().getStatusCode(),
((TableServiceException) ex).getResponse().getHeaders(),
null);
} else {
throw logger.logExceptionAsError((RuntimeException) (TableUtils.mapThrowableToTableServiceException(ex)));
}
}
/**
* Inserts an {@link TableEntity entity} into the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Inserts an {@link TableEntity entity} into the table. Prints out the details of the created
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.createEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createEntity
*
* @param entity The {@link TableEntity entity} to insert.
*
* @throws TableServiceException If an {@link TableEntity entity} with the same partition key and row key already
* exists within the table.
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void createEntity(TableEntity entity) {
createEntityWithResponse(entity, null, null);
}
/**
* Inserts an {@link TableEntity entity} into the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Inserts an {@link TableEntity entity} into the table. Prints out the details of the
* {@link Response HTTP response} and the created {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createEntityWithResponse
* <pre>
*
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.createEntityWithResponse&
* new Context&
*
* System.out.printf&
* + " '%s' was created.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createEntityWithResponse
*
* @param entity The {@link TableEntity entity} to insert.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If an {@link TableEntity entity} with the same partition key and row key already
* exists within the table.
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> createEntityWithResponse(TableEntity entity, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
Response<Map<String, Object>> response = tablesImplementation.getTables().insertEntityWithResponse(
tableName, null, null, ResponseFormat.RETURN_NO_CONTENT,
entity.getProperties(), null, contextValue);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null);
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Inserts an {@link TableEntity entity} into the table if it does not exist, or merges the
* {@link TableEntity entity} with the existing {@link TableEntity entity} otherwise.
*
* <p><strong>Code Samples</strong></p>
* <p>Upserts an {@link TableEntity entity} into the table. Prints out the details of the upserted
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.upsertEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.upsertEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.upsertEntity
*
* @param entity The {@link TableEntity entity} to upsert.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void upsertEntity(TableEntity entity) {
upsertEntityWithResponse(entity, null, null, null);
}
/**
* Inserts an {@link TableEntity entity} into the table if it does not exist, or updates the existing
* {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode} otherwise. The default
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity
* entity}.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Upserts an {@link TableEntity entity} into the table with the specified
* {@link TableEntityUpdateMode update mode} if said {@link TableEntity entity} already exists. Prints out the
* details of the {@link Response HTTP response} and the upserted {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.upsertEntityWithResponse
* <pre>
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.upsertEntityWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* + " '%s' was updated&
* </pre>
* <!-- end com.azure.data.tables.tableClient.upsertEntityWithResponse
*
* @param entity The {@link TableEntity entity} to upsert.
* @param updateMode The type of update to perform if the {@link TableEntity entity} already exits.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> upsertEntityWithResponse(TableEntity entity, TableEntityUpdateMode updateMode,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
String partitionKey = TableUtils.escapeSingleQuotes(entity.getPartitionKey());
String rowKey = TableUtils.escapeSingleQuotes(entity.getRowKey());
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
if (updateMode == TableEntityUpdateMode.REPLACE) {
return tablesImplementation.getTables().updateEntityWithResponse(
tableName, partitionKey, rowKey, null, null, null,
entity.getProperties(), null, contextValue);
} else {
return tablesImplementation.getTables().mergeEntityWithResponse(
tableName, partitionKey, rowKey, null, null, null,
entity.getProperties(), null, contextValue);
}
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Updates an existing {@link TableEntity entity} by merging the provided {@link TableEntity entity} with the
* existing {@link TableEntity entity}.
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table. Prints out the details of the updated
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.updateEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntity
*
* @param entity The {@link TableEntity entity} to update.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void updateEntity(TableEntity entity) {
updateEntity(entity, null);
}
/**
* Updates an existing {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode}.
* The default {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity entity}.
* </p>
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table with the specified
* {@link TableEntityUpdateMode update mode}. Prints out the details of the updated {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntity
* <pre>
*
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* tableClient.updateEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntity
*
* @param entity The {@link TableEntity entity} to update.
* @param updateMode The type of update to perform.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void updateEntity(TableEntity entity, TableEntityUpdateMode updateMode) {
updateEntityWithResponse(entity, updateMode, false, null, null);
}
/**
* Updates an existing {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode}.
* The default {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity entity}.
* </p>
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table with the specified {@link TableEntityUpdateMode update
* mode}
* if the {@code ETags} on both {@link TableEntity entities} match. Prints out the details of the
* {@link Response HTTP response} updated {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntityWithResponse
* <pre>
* TableEntity someTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.updateEntityWithResponse&
* true, Duration.ofSeconds&
*
* System.out.printf&
* + " '%s' was updated.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntityWithResponse
*
* @param entity The {@link TableEntity entity} to update.
* @param updateMode The type of update to perform.
* @param ifUnchanged When true, the ETag of the provided {@link TableEntity entity} must match the ETag of the
* {@link TableEntity entity} in the Table service. If the values do not match, the update will not occur and an
* exception will be thrown.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table, or if {@code ifUnchanged} is {@code true} and the existing {@link TableEntity entity}'s ETag
* does not match that of the provided {@link TableEntity entity}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> updateEntityWithResponse(TableEntity entity, TableEntityUpdateMode updateMode,
boolean ifUnchanged, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
String partitionKey = TableUtils.escapeSingleQuotes(entity.getPartitionKey());
String rowKey = TableUtils.escapeSingleQuotes(entity.getRowKey());
String eTag = ifUnchanged ? entity.getETag() : "*";
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
if (updateMode == TableEntityUpdateMode.REPLACE) {
return tablesImplementation.getTables()
.updateEntityWithResponse(tableName, partitionKey, rowKey, null, null, eTag,
entity.getProperties(), null, contextValue);
} else {
return tablesImplementation.getTables()
.mergeEntityWithResponse(tableName, partitionKey, rowKey, null, null, eTag,
entity.getProperties(), null, contextValue);
}
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes an {@link TableEntity entity} on the table. Prints out the entity's {@code partitionKey} and
* {@code rowKey}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntity
* <pre>
* tableClient.deleteEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntity
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The row key of the {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null} or
* empty.
* @throws TableServiceException If the request is rejected by the service.
* @throws IllegalArgumentException If 'partitionKey' or 'rowKey' is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteEntity(String partitionKey, String rowKey) {
deleteEntityWithResponse(partitionKey, rowKey, null, false, null, null);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a {@link TableEntity entity} on the table. Prints out the details of the deleted
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntity
* <pre>
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* tableClient.deleteEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntity
*
* @param entity The {@link TableEntity entity} to delete.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteEntity(TableEntity entity) {
deleteEntityWithResponse(entity, false, null, null);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a {@link TableEntity entity} on the table. Prints out the details of the
* {@link Response HTTP response} and the deleted {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntityWithResponse
* <pre>
* TableEntity someTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.deleteEntityWithResponse&
* new Context&
*
* System.out.printf&
* + " '%s' was deleted.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntityWithResponse
*
* @param entity The table {@link TableEntity entity} to delete.
* @param ifUnchanged When true, the ETag of the provided {@link TableEntity entity} must match the ETag of the
* {@link TableEntity entity} in the Table service. If the values do not match, the update will not occur and an
* exception will be thrown.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
* @throws IllegalArgumentException If the entity has null 'partitionKey' or 'rowKey'.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteEntityWithResponse(TableEntity entity, boolean ifUnchanged, Duration timeout,
Context context) {
return deleteEntityWithResponse(
entity.getPartitionKey(), entity.getRowKey(), entity.getETag(), ifUnchanged, timeout, context);
}
private Response<Void> deleteEntityWithResponse(String partitionKey, String rowKey, String eTag, boolean ifUnchanged,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
String finalETag = ifUnchanged ? eTag : "*";
if (partitionKey == null || rowKey == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'partitionKey' and 'rowKey' cannot be null"));
}
Supplier<Response<Void>> callable = () -> tablesImplementation.getTables().deleteEntityWithResponse(
tableName, TableUtils.escapeSingleQuotes(partitionKey), TableUtils.escapeSingleQuotes(rowKey), finalETag,
null, null, null, contextValue);
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
return swallow404Exception(mapThrowableToTableServiceException(ex));
}
}
/**
* Lists all {@link TableEntity entities} within the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Lists all {@link TableEntity entities} on the table. Prints out the details of the
* retrieved {@link TableEntity entities}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.listEntities -->
* <pre>
* PagedIterable<TableEntity> tableEntities = tableClient.listEntities&
*
* tableEntities.forEach&
* System.out.printf&
* tableEntity.getPartitionKey&
* </pre>
* <!-- end com.azure.data.tables.tableClient.listEntities -->
*
* @return A {@link PagedIterable} containing all {@link TableEntity entities} within the table.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<TableEntity> listEntities() {
return listEntities(new ListEntitiesOptions(), null, null);
}
/**
* Lists {@link TableEntity entities} using the parameters in the provided options.
*
* <p>If the {@code filter} parameter in the options is set, only {@link TableEntity entities} matching the filter
* will be returned. If the {@code select} parameter is set, only the properties included in the select parameter
* will be returned for each {@link TableEntity entity}. If the {@code top} parameter is set, the maximum number of
* returned {@link TableEntity entities} per page will be limited to that value.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Lists all {@link TableEntity entities} on the table. Prints out the details of the
* {@link Response HTTP response} and all the retrieved {@link TableEntity entities}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.listEntities
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* ListEntitiesOptions listEntitiesOptions = new ListEntitiesOptions&
* .setTop&
* .setFilter&
* .setSelect&
*
* PagedIterable<TableEntity> myTableEntities = tableClient.listEntities&
* Duration.ofSeconds&
*
* myTableEntities.forEach&
* System.out.printf&
* tableEntity.getPartitionKey&
*
* tableEntity.getProperties&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.tables.tableClient.listEntities
*
* @param options The {@code filter}, {@code select}, and {@code top} OData query options to apply to this
* operation.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return A {@link PagedIterable} containing matching {@link TableEntity entities} within the table.
*
* @throws IllegalArgumentException If one or more of the OData query options in {@code options} is malformed.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
private <T extends TableEntity> PagedResponse<T> listEntitiesFirstPage(Context context,
ListEntitiesOptions options,
Class<T> resultType) {
return listEntities(null, null, context, options, resultType);
}
private <T extends TableEntity> PagedResponse<T> listEntitiesNextPage(String token, Context context,
ListEntitiesOptions options,
Class<T> resultType) {
if (token == null) {
return null;
}
try {
String[] keys = TableUtils.getKeysFromToken(token);
return listEntities(keys[0], keys[1], context, options, resultType);
} catch (RuntimeException ex) {
throw logger.logExceptionAsError(ex);
}
}
private <T extends TableEntity> PagedResponse<T> listEntities(String nextPartitionKey, String nextRowKey,
Context context, ListEntitiesOptions options,
Class<T> resultType) {
Context contextValue = TableUtils.setContext(context, true);
String select = null;
if (options.getSelect() != null) {
select = String.join(",", options.getSelect());
}
QueryOptions queryOptions = new QueryOptions()
.setFilter(options.getFilter())
.setTop(options.getTop())
.setSelect(select)
.setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA);
final ResponseBase<TablesQueryEntitiesHeaders, TableEntityQueryResponse> response =
tablesImplementation.getTables().queryEntitiesWithResponse(tableName, null, null,
nextPartitionKey, nextRowKey, queryOptions, contextValue);
final TableEntityQueryResponse tablesQueryEntityResponse = response.getValue();
if (tablesQueryEntityResponse == null) {
return null;
}
final List<Map<String, Object>> entityResponseValue = tablesQueryEntityResponse.getValue();
if (entityResponseValue == null) {
return null;
}
final List<T> entities = entityResponseValue.stream()
.map(TableEntityAccessHelper::createEntity)
.map(e -> EntityHelper.convertToSubclass(e, resultType, logger))
.collect(Collectors.toList());
return new EntityPaged<>(response, entities,
response.getDeserializedHeaders().getXMsContinuationNextPartitionKey(),
response.getDeserializedHeaders().getXMsContinuationNextRowKey());
}
/**
* Gets a single {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets an {@link TableEntity entity} on the table. Prints out the details of the retrieved
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getEntity
* <pre>
* TableEntity tableEntity = tableClient.getEntity&
*
* System.out.printf&
* tableEntity.getRowKey&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getEntity
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The partition key of the {@link TableEntity entity}.
*
* @return The {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null} or
* empty.
* @throws TableServiceException If no {@link TableEntity entity} with the provided {@code partitionKey} and
* {@code rowKey} exists within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableEntity getEntity(String partitionKey, String rowKey) {
return getEntityWithResponse(partitionKey, rowKey, null, null, null).getValue();
}
/**
* Gets a single {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets an {@link TableEntity entity} on the table. Prints out the details of the {@link Response HTTP response}
* retrieved {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getEntityWithResponse
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* Response<TableEntity> response = tableClient.getEntityWithResponse&
* Duration.ofSeconds&
*
* TableEntity myTableEntity = response.getValue&
*
* System.out.printf&
* + " '%s' and properties:", response.getStatusCode&
* myTableEntity.getRowKey&
*
* myTableEntity.getProperties&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getEntityWithResponse
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The partition key of the {@link TableEntity entity}.
* @param select A list of properties to select on the {@link TableEntity entity}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response} containing the {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null}
* or if the {@code select} OData query option is malformed.
* @throws TableServiceException If no {@link TableEntity entity} with the provided {@code partitionKey} and
* {@code rowKey} exists within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableEntity> getEntityWithResponse(String partitionKey, String rowKey, List<String> select,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
QueryOptions queryOptions = new QueryOptions()
.setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA);
if (select != null) {
queryOptions.setSelect(String.join(",", select));
}
if (partitionKey == null || rowKey == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("'partitionKey' and 'rowKey' cannot be null."));
}
Supplier<Response<TableEntity>> callable = () -> {
ResponseBase<TablesQueryEntityWithPartitionAndRowKeyHeaders, Map<String, Object>> response =
tablesImplementation.getTables().queryEntityWithPartitionAndRowKeyWithResponse(
tableName, TableUtils.escapeSingleQuotes(partitionKey), TableUtils.escapeSingleQuotes(rowKey), null,
null, queryOptions, contextValue);
final Map<String, Object> matchingEntity = response.getValue();
if (matchingEntity == null || matchingEntity.isEmpty()) {
logger.info("There was no matching entity. Table {}, partition key: {}, row key: {}.",
tableName, partitionKey, rowKey);
return null;
}
final TableEntity entity = TableEntityAccessHelper.createEntity(matchingEntity);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
EntityHelper.convertToSubclass(entity, TableEntity.class, logger));
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Retrieves details about any stored {@link TableAccessPolicies access policies} specified on the table that may
* be used with Shared Access Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Gets a table's {@link TableAccessPolicies access policies}. Prints out the details of the retrieved
* {@link TableAccessPolicies access policies}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getAccessPolicies -->
* <pre>
* TableAccessPolicies accessPolicies = tableClient.getAccessPolicies&
*
* accessPolicies.getIdentifiers&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getAccessPolicies -->
*
* @return The table's {@link TableAccessPolicies access policies}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableAccessPolicies getAccessPolicies() {
return getAccessPoliciesWithResponse(null, null).getValue();
}
/**
* Retrieves details about any stored {@link TableAccessPolicies access policies} specified on the table that may be
* used with Shared Access Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Gets a table's {@link TableAccessPolicies access policies}. Prints out the details of the
* {@link Response HTTP response} and the retrieved {@link TableAccessPolicies access policies}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getAccessPoliciesWithResponse
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* Response<TableAccessPolicies> response = tableClient.getAccessPoliciesWithResponse&
* new Context&
*
* System.out.printf&
* + " IDs:", response.getStatusCode&
*
* response.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getAccessPoliciesWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return An {@link Response HTTP response} containing the table's {@link TableAccessPolicies access policies}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableAccessPolicies> getAccessPoliciesWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
Supplier<Response<TableAccessPolicies>> callable = () -> {
ResponseBase<TablesGetAccessPolicyHeaders, List<SignedIdentifier>> response =
tablesImplementation.getTables().getAccessPolicyWithResponse(tableName, null, null, contextValue);
return new SimpleResponse<>(response,
new TableAccessPolicies(response.getValue() == null ? null : response.getValue().stream()
.map(TableUtils::toTableSignedIdentifier)
.collect(Collectors.toList())));
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Sets stored {@link TableAccessPolicies access policies} for the table that may be used with Shared Access
* Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Sets stored {@link TableAccessPolicies access policies} on a table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.setAccessPolicies
* <pre>
* List<TableSignedIdentifier> signedIdentifiers = new ArrayList<>&
*
* signedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
* signedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* tableClient.setAccessPolicies&
*
* System.out.print&
* </pre>
* <!-- end com.azure.data.tables.tableClient.setAccessPolicies
*
* @param tableSignedIdentifiers The {@link TableSignedIdentifier access policies} for the table.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessPolicies(List<TableSignedIdentifier> tableSignedIdentifiers) {
setAccessPoliciesWithResponse(tableSignedIdentifiers, null, null);
}
/**
* Sets stored {@link TableAccessPolicies access policies} for the table that may be used with Shared Access
* Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Sets stored {@link TableAccessPolicies access policies} on a table. Prints out details of the
* {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.setAccessPoliciesWithResponse
* <pre>
* List<TableSignedIdentifier> mySignedIdentifiers = new ArrayList<>&
*
* mySignedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
* mySignedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* Response<Void> response = tableClient.setAccessPoliciesWithResponse&
* new Context&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.setAccessPoliciesWithResponse
*
* @param tableSignedIdentifiers The {@link TableSignedIdentifier access policies} for the table.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessPoliciesWithResponse(List<TableSignedIdentifier> tableSignedIdentifiers,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
List<SignedIdentifier> signedIdentifiers = null;
if (tableSignedIdentifiers != null) {
signedIdentifiers = tableSignedIdentifiers.stream()
.map(tableSignedIdentifier -> {
SignedIdentifier signedIdentifier = TableUtils.toSignedIdentifier(tableSignedIdentifier);
if (signedIdentifier != null) {
if (signedIdentifier.getAccessPolicy() != null
&& signedIdentifier.getAccessPolicy().getStart() != null) {
signedIdentifier.getAccessPolicy()
.setStart(signedIdentifier.getAccessPolicy()
.getStart().truncatedTo(ChronoUnit.SECONDS));
}
if (signedIdentifier.getAccessPolicy() != null
&& signedIdentifier.getAccessPolicy().getExpiry() != null) {
signedIdentifier.getAccessPolicy()
.setExpiry(signedIdentifier.getAccessPolicy()
.getExpiry().truncatedTo(ChronoUnit.SECONDS));
}
}
return signedIdentifier;
})
.collect(Collectors.toList());
}
List<SignedIdentifier> finalSignedIdentifiers = signedIdentifiers;
Supplier<Response<Void>> callable = () -> {
ResponseBase<TablesSetAccessPolicyHeaders, Void> response = tablesImplementation.getTables()
.setAccessPolicyWithResponse(tableName, null, null,
finalSignedIdentifiers, contextValue);
return new SimpleResponse<>(response, response.getValue());
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Executes all {@link TableTransactionAction actions} within the list inside a transaction. When the call
* completes, either all {@link TableTransactionAction actions} in the transaction will succeed, or if a failure
* occurs, all {@link TableTransactionAction actions} in the transaction will be rolled back.
* {@link TableTransactionAction Actions} are executed sequantially. Each {@link TableTransactionAction action}
* must operate on a distinct row key. Attempting to pass multiple {@link TableTransactionAction actions} that
* share the same row key will cause an error.
*
* <p><strong>Code Samples</strong></p>
* <p>Submits a transaction that contains multiple {@link TableTransactionAction actions} to be applied to
* {@link TableEntity entities} on a table. Prints out details of each {@link TableTransactionAction action}'s
* {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransaction
* <pre>
* List<TableTransactionAction> transactionActions = new ArrayList<>&
*
* String partitionKey = "markers";
* String firstEntityRowKey = "m001";
* String secondEntityRowKey = "m002";
*
* TableEntity firstEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* transactionActions.add&
*
* System.out.printf&
* firstEntityRowKey&
*
* TableEntity secondEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* transactionActions.add&
*
* System.out.printf&
* secondEntityRowKey&
*
* TableTransactionResult tableTransactionResult = tableClient.submitTransaction&
*
* System.out.print&
*
* tableTransactionResult.getTransactionActionResponses&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransaction
* <p>Shows how to handle a transaction with a failing {@link TableTransactionAction action} via the provided
* {@link TableTransactionFailedException exception}, which contains the index of the first failing action in the
* transaction.</p>
* <!-- src_embed com.azure.data.tables.tableAsyncClient.submitTransactionWithError
* <pre>
*
* tableAsyncClient.submitTransaction&
* .contextWrite&
* .doOnError&
* &
* int failedActionIndex = e.getFailedTransactionActionIndex&
* &
* &
* transactionActions.remove&
* &
* &
* .subscribe&
* System.out.print&
*
* tableTransactionResult.getTransactionActionResponses&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.tables.tableAsyncClient.submitTransactionWithError
*
* @param transactionActions A {@link List} of {@link TableTransactionAction actions} to perform on
* {@link TableEntity entities} in a table.
*
* @return A {@link List} of {@link TableTransactionActionResponse sub-responses} that correspond to each
* {@link TableTransactionResult action} in the transaction.
*
* @throws IllegalArgumentException If no {@link TableTransactionAction actions} have been added to the list.
* @throws TableServiceException If the request is rejected by the service.
* @throws TableTransactionFailedException If any {@link TableTransactionResult action} within the transaction
* fails. See the documentation for the client methods in {@link TableClient} to understand the conditions that
* may cause a given {@link TableTransactionAction action} to fail.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableTransactionResult submitTransaction(List<TableTransactionAction> transactionActions) {
return submitTransactionWithResponse(transactionActions, null, null).getValue();
}
/**
* Executes all {@link TableTransactionAction actions} within the list inside a transaction. When the call
* completes, either all {@link TableTransactionAction actions} in the transaction will succeed, or if a failure
* occurs, all {@link TableTransactionAction actions} in the transaction will be rolled back.
* {@link TableTransactionAction Actions} are executed sequantially. Each {@link TableTransactionAction action}
* must operate on a distinct row key. Attempting to pass multiple {@link TableTransactionAction actions} that
* share the same row key will cause an error.
*
* <p><strong>Code Samples</strong></p>
* <p>Submits a transaction that contains multiple {@link TableTransactionAction actions} to be applied to
* {@link TableEntity entities} on a table. Prints out details of the {@link Response HTTP response} for the
* operation, as well as each {@link TableTransactionAction action}'s corresponding {@link Response HTTP
* response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransactionWithResponse
* <pre>
* List<TableTransactionAction> myTransactionActions = new ArrayList<>&
*
* String myPartitionKey = "markers";
* String myFirstEntityRowKey = "m001";
* String mySecondEntityRowKey = "m002";
*
* TableEntity myFirstEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* myTransactionActions.add&
*
* System.out.printf&
* myFirstEntityRowKey&
*
* TableEntity mySecondEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* myTransactionActions.add&
*
* System.out.printf&
* mySecondEntityRowKey&
*
* Response<TableTransactionResult> response = tableClient.submitTransactionWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* + " actions are:", response.getStatusCode&
*
* response.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransactionWithResponse
* <p>Shows how to handle a transaction with a failing {@link TableTransactionAction action} via the provided
* {@link TableTransactionFailedException exception}, which contains the index of the first failing action in the
* transaction.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransactionWithResponseWithError
* <pre>
* try &
* Response<TableTransactionResult> transactionResultResponse =
* tableClient.submitTransactionWithResponse&
* new Context&
*
* System.out.printf&
* + " submitted actions are:", transactionResultResponse.getStatusCode&
*
* transactionResultResponse.getValue&
* .forEach&
* System.out.printf&
* &
* &
* int failedActionIndex = e.getFailedTransactionActionIndex&
* &
* &
* myTransactionActions.remove&
* &
* &
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransactionWithResponseWithError
*
* @param transactionActions A {@link List} of {@link TableTransactionAction transaction actions} to perform on
* {@link TableEntity entities} in a table.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return An {@link Response HTTP response} produced for the transaction itself. The response's value will contain
* a {@link List} of {@link TableTransactionActionResponse sub-responses} that correspond to each
* {@link TableTransactionAction action} in the transaction.
*
* @throws IllegalArgumentException If no {@link TableTransactionAction actions} have been added to the list.
* @throws TableServiceException If the request is rejected by the service.
* @throws TableTransactionFailedException If any {@link TableTransactionAction action} within the transaction
* fails. See the documentation for the client methods in {@link TableClient} to understand the conditions that
* may cause a given {@link TableTransactionAction action} to fail.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableTransactionResult> submitTransactionWithResponse(List<TableTransactionAction> transactionActions, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (transactionActions.isEmpty()) {
throw logger.logExceptionAsError(
new IllegalArgumentException("A transaction must contain at least one operation."));
}
final List<TransactionalBatchAction> operations = new ArrayList<>();
for (TableTransactionAction transactionAction : transactionActions) {
switch (transactionAction.getActionType()) {
case CREATE:
operations.add(new TransactionalBatchAction.CreateEntity(transactionAction.getEntity()));
break;
case UPSERT_MERGE:
operations.add(new TransactionalBatchAction.UpsertEntity(transactionAction.getEntity(),
TableEntityUpdateMode.MERGE));
break;
case UPSERT_REPLACE:
operations.add(new TransactionalBatchAction.UpsertEntity(transactionAction.getEntity(),
TableEntityUpdateMode.REPLACE));
break;
case UPDATE_MERGE:
operations.add(new TransactionalBatchAction.UpdateEntity(transactionAction.getEntity(),
TableEntityUpdateMode.MERGE, transactionAction.getIfUnchanged()));
break;
case UPDATE_REPLACE:
operations.add(new TransactionalBatchAction.UpdateEntity(transactionAction.getEntity(),
TableEntityUpdateMode.REPLACE, transactionAction.getIfUnchanged()));
break;
case DELETE:
operations.add(
new TransactionalBatchAction.DeleteEntity(transactionAction.getEntity(),
transactionAction.getIfUnchanged()));
break;
default:
break;
}
}
Supplier<Response<TableTransactionResult>> callable = () -> {
BiConsumer<TransactionalBatchRequestBody, RequestActionPair> accumulator = (body, pair) ->
body.addChangeOperation(new TransactionalBatchSubRequest(pair.getAction(), pair.getRequest()));
BiConsumer<TransactionalBatchRequestBody, TransactionalBatchRequestBody> combiner = (body1, body2) ->
body2.getContents().forEach(req -> body1.addChangeOperation((TransactionalBatchSubRequest) req));
TransactionalBatchRequestBody requestBody =
operations.stream()
.map(op -> new RequestActionPair(op.prepareRequest(transactionalBatchClient), op))
.collect(TransactionalBatchRequestBody::new, accumulator, combiner);
ResponseBase<TransactionalBatchSubmitBatchHeaders, TableTransactionActionResponse[]> response =
transactionalBatchImplementation
.submitTransactionalBatchWithRestResponse(requestBody, null, contextValue);
Response<List<TableTransactionActionResponse>> parsedResponse = parseResponse(requestBody, response);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new TableTransactionResult(transactionActions, parsedResponse.getValue()));
};
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
throw logger.logExceptionAsError((RuntimeException) TableUtils.interpretException(ex));
}
}
private static class RequestActionPair {
private final HttpRequest request;
private final TransactionalBatchAction action;
RequestActionPair(HttpRequest request, TransactionalBatchAction action) {
this.request = request;
this.action = action;
}
public HttpRequest getRequest() {
return request;
}
public TransactionalBatchAction getAction() {
return action;
}
}
private Response<List<TableTransactionActionResponse>> parseResponse(TransactionalBatchRequestBody requestBody,
ResponseBase<TransactionalBatchSubmitBatchHeaders, TableTransactionActionResponse[]> response) {
TableServiceError error = null;
String errorMessage = null;
TransactionalBatchChangeSet changes = null;
TransactionalBatchAction failedAction = null;
Integer failedIndex = null;
if (requestBody.getContents().get(0) instanceof TransactionalBatchChangeSet) {
changes = (TransactionalBatchChangeSet) requestBody.getContents().get(0);
}
for (int i = 0; i < response.getValue().length; i++) {
TableTransactionActionResponse subResponse = response.getValue()[i];
if (changes != null && changes.getContents().get(i) != null) {
TableTransactionActionResponseAccessHelper.updateTableTransactionActionResponse(subResponse,
changes.getContents().get(i).getHttpRequest());
}
if (subResponse.getStatusCode() >= 400 && error == null && errorMessage == null) {
if (subResponse.getValue() instanceof TableServiceError) {
error = (TableServiceError) subResponse.getValue();
if (changes != null && error.getOdataError() != null
&& error.getOdataError().getMessage() != null
&& error.getOdataError().getMessage().getValue() != null) {
String message = error.getOdataError().getMessage().getValue();
try {
failedIndex = Integer.parseInt(message.substring(0, message.indexOf(":")));
failedAction = changes.getContents().get(failedIndex).getOperation();
} catch (NumberFormatException e) {
}
}
} else if (subResponse.getValue() instanceof String) {
errorMessage = "The service returned the following data for the failed operation: "
+ subResponse.getValue();
} else {
errorMessage =
"The service returned the following status code for the failed operation: "
+ subResponse.getStatusCode();
}
}
}
if (error != null || errorMessage != null) {
String message = "An action within the operation failed, the transaction has been rolled back.";
if (failedAction != null) {
message += " The failed operation was: " + failedAction;
} else if (errorMessage != null) {
message += " " + errorMessage;
}
throw logger.logExceptionAsError(new RuntimeException(
new TableTransactionFailedException(message, null, toTableServiceError(error), failedIndex)));
} else {
return new SimpleResponse<>(response, Arrays.asList(response.getValue()));
}
}
} |
@alzimmermsft From my testing I think you're right about this. This should be addressed in a separate PR. If you haven't already, I can file an issue for Tables to address this. | public PagedIterable<TableEntity> listEntities(ListEntitiesOptions options, Duration timeout, Context context) {
Supplier<PagedIterable<TableEntity>> callable = () -> new PagedIterable<>(
() -> listEntitiesFirstPage(context, options, TableEntity.class),
token -> listEntitiesNextPage(token, context, options, TableEntity.class));
return callIterableWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
} | return callIterableWithOptionalTimeout(callable, THREAD_POOL, timeout, logger); | public PagedIterable<TableEntity> listEntities(ListEntitiesOptions options, Duration timeout, Context context) {
Supplier<PagedIterable<TableEntity>> callable = () -> new PagedIterable<>(
() -> listEntitiesFirstPage(context, options, TableEntity.class),
token -> listEntitiesNextPage(token, context, options, TableEntity.class));
return callIterableWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
} | class TableClient {
private static final ExecutorService THREAD_POOL = TableUtils.getThreadPoolWithShutdownHook();
private final ClientLogger logger = new ClientLogger(TableClient.class);
private final String tableName;
private final AzureTableImpl tablesImplementation;
private final TransactionalBatchImpl transactionalBatchImplementation;
private final String accountName;
private final String tableEndpoint;
private final HttpPipeline pipeline;
private final TableClient transactionalBatchClient;
TableClient(String tableName, HttpPipeline pipeline, String serviceUrl, TableServiceVersion serviceVersion,
SerializerAdapter tablesSerializer, SerializerAdapter transactionalBatchSerializer) {
try {
if (tableName == null) {
throw new NullPointerException(("'tableName' must not be null to create TableClient."));
}
if (tableName.isEmpty()) {
throw new IllegalArgumentException("'tableName' must not be empty to create a TableClient.");
}
final URI uri = URI.create(serviceUrl);
this.accountName = uri.getHost().split("\\.", 2)[0];
this.tableEndpoint = uri.resolve("/" + tableName).toString();
logger.verbose("Table Service URI: {}", uri);
} catch (NullPointerException | IllegalArgumentException ex) {
throw logger.logExceptionAsError(ex);
}
this.tablesImplementation = new AzureTableImplBuilder()
.url(serviceUrl)
.serializerAdapter(tablesSerializer)
.pipeline(pipeline)
.version(serviceVersion.getVersion())
.buildClient();
this.transactionalBatchImplementation =
new TransactionalBatchImpl(tablesImplementation, transactionalBatchSerializer);
this.tableName = tableName;
this.pipeline = tablesImplementation.getHttpPipeline();
this.transactionalBatchClient = new TableClient(this, serviceVersion, tablesSerializer);
}
TableClient(TableClient client, ServiceVersion serviceVersion, SerializerAdapter tablesSerializer) {
this.accountName = client.getAccountName();
this.tableEndpoint = client.getTableEndpoint();
this.pipeline = BuilderHelper.buildNullClientPipeline();
this.tablesImplementation = new AzureTableImplBuilder()
.url(client.getTablesImplementation().getUrl())
.serializerAdapter(tablesSerializer)
.pipeline(this.pipeline)
.version(serviceVersion.getVersion())
.buildClient();
this.tableName = client.getTableName();
this.transactionalBatchImplementation = null;
this.transactionalBatchClient = null;
}
/**
* Gets the name of the table.
*
* @return The name of the table.
*/
public String getTableName() {
return tableName;
}
/**
* Gets the name of the account containing the table.
*
* @return The name of the account containing the table.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the endpoint for this table.
*
* @return The endpoint for this table.
*/
public String getTableEndpoint() {
return tableEndpoint;
}
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the {@link AzureTableImpl} powering this client.
*
* @return This client's {@link AzureTableImpl}.
*/
AzureTableImpl getTablesImplementation() {
return tablesImplementation;
}
/**
* Gets the REST API version used by this client.
*
* @return The REST API version used by this client.
*/
public TableServiceVersion getServiceVersion() {
return TableServiceVersion.fromString(tablesImplementation.getVersion());
}
/**
* Generates a service SAS for the table using the specified {@link TableSasSignatureValues}.
*
* <p><strong>Note:</strong> The client must be authenticated via {@link AzureNamedKeyCredential}.</p>
* <p>See {@link TableSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* @param tableSasSignatureValues {@link TableSasSignatureValues}.
*
* @return A {@code String} representing the SAS query parameters.
*
* @throws IllegalStateException If this {@link TableClient} is not authenticated with an
* {@link AzureNamedKeyCredential}.
*/
public String generateSas(TableSasSignatureValues tableSasSignatureValues) {
AzureNamedKeyCredential azureNamedKeyCredential = TableSasUtils.extractNamedKeyCredential(getHttpPipeline());
if (azureNamedKeyCredential == null) {
throw logger.logExceptionAsError(new IllegalStateException("Cannot generate a SAS token with a client that"
+ " is not authenticated with an AzureNamedKeyCredential."));
}
return new TableSasGenerator(tableSasSignatureValues, getTableName(), azureNamedKeyCredential).getSas();
}
/**
* Creates the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Creates a table. Prints out the details of the created table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createTable -->
* <pre>
* TableItem tableItem = tableClient.createTable&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createTable -->
*
* @return A {@link TableItem} that represents the table.
*
* @throws TableServiceException If a table with the same name already exists within the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableItem createTable() {
return createTableWithResponse(null, null).getValue();
}
/**
* Creates the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Creates a table. Prints out the details of the {@link Response HTTP response} and the created table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createTableWithResponse
* <pre>
* Response<TableItem> response = tableClient.createTableWithResponse&
* new Context&
*
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createTableWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response} containing a {@link TableItem} that represents the table.
*
* @throws TableServiceException If a table with the same name already exists within the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableItem> createTableWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
final TableProperties properties = new TableProperties().setTableName(tableName);
Supplier<Response<TableItem>> callable = () ->
new SimpleResponse<>(tablesImplementation.getTables().createWithResponse(properties, null,
ResponseFormat.RETURN_NO_CONTENT, null, contextValue),
TableItemAccessHelper.createItem(new TableResponseProperties().setTableName(tableName)));
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Deletes the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteTable -->
* <pre>
* tableClient.deleteTable&
*
* System.out.print&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteTable -->
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteTable() {
deleteTableWithResponse(null, null);
}
/**
* Deletes the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a table. Prints out the details of the {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteTableWithResponse
* <pre>
* Response<Void> response = tableClient.deleteTableWithResponse&
* new Context&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteTableWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteTableWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
Supplier<Response<Void>> callable = () -> new SimpleResponse<>(tablesImplementation.getTables()
.deleteWithResponse(tableName, null, contextValue), null);
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
Throwable except = mapThrowableToTableServiceException(ex);
return swallow404Exception(except);
}
}
private Response<Void> swallow404Exception(Throwable ex) {
if (ex instanceof TableServiceException
&& ((TableServiceException) ex).getResponse().getStatusCode() == 404) {
return new SimpleResponse<>(
((TableServiceException) ex).getResponse().getRequest(),
((TableServiceException) ex).getResponse().getStatusCode(),
((TableServiceException) ex).getResponse().getHeaders(),
null);
} else {
throw logger.logExceptionAsError((RuntimeException) (TableUtils.mapThrowableToTableServiceException(ex)));
}
}
/**
* Inserts an {@link TableEntity entity} into the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Inserts an {@link TableEntity entity} into the table. Prints out the details of the created
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.createEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createEntity
*
* @param entity The {@link TableEntity entity} to insert.
*
* @throws TableServiceException If an {@link TableEntity entity} with the same partition key and row key already
* exists within the table.
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void createEntity(TableEntity entity) {
createEntityWithResponse(entity, null, null);
}
/**
* Inserts an {@link TableEntity entity} into the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Inserts an {@link TableEntity entity} into the table. Prints out the details of the
* {@link Response HTTP response} and the created {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createEntityWithResponse
* <pre>
*
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.createEntityWithResponse&
* new Context&
*
* System.out.printf&
* + " '%s' was created.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createEntityWithResponse
*
* @param entity The {@link TableEntity entity} to insert.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If an {@link TableEntity entity} with the same partition key and row key already
* exists within the table.
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> createEntityWithResponse(TableEntity entity, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
Response<Map<String, Object>> response = tablesImplementation.getTables().insertEntityWithResponse(
tableName, null, null, ResponseFormat.RETURN_NO_CONTENT,
entity.getProperties(), null, contextValue);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null);
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Inserts an {@link TableEntity entity} into the table if it does not exist, or merges the
* {@link TableEntity entity} with the existing {@link TableEntity entity} otherwise.
*
* <p><strong>Code Samples</strong></p>
* <p>Upserts an {@link TableEntity entity} into the table. Prints out the details of the upserted
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.upsertEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.upsertEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.upsertEntity
*
* @param entity The {@link TableEntity entity} to upsert.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void upsertEntity(TableEntity entity) {
upsertEntityWithResponse(entity, null, null, null);
}
/**
* Inserts an {@link TableEntity entity} into the table if it does not exist, or updates the existing
* {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode} otherwise. The default
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity
* entity}.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Upserts an {@link TableEntity entity} into the table with the specified
* {@link TableEntityUpdateMode update mode} if said {@link TableEntity entity} already exists. Prints out the
* details of the {@link Response HTTP response} and the upserted {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.upsertEntityWithResponse
* <pre>
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.upsertEntityWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* + " '%s' was updated&
* </pre>
* <!-- end com.azure.data.tables.tableClient.upsertEntityWithResponse
*
* @param entity The {@link TableEntity entity} to upsert.
* @param updateMode The type of update to perform if the {@link TableEntity entity} already exits.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> upsertEntityWithResponse(TableEntity entity, TableEntityUpdateMode updateMode,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
String partitionKey = TableUtils.escapeSingleQuotes(entity.getPartitionKey());
String rowKey = TableUtils.escapeSingleQuotes(entity.getRowKey());
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
if (updateMode == TableEntityUpdateMode.REPLACE) {
return tablesImplementation.getTables().updateEntityWithResponse(
tableName, partitionKey, rowKey, null, null, null,
entity.getProperties(), null, contextValue);
} else {
return tablesImplementation.getTables().mergeEntityWithResponse(
tableName, partitionKey, rowKey, null, null, null,
entity.getProperties(), null, contextValue);
}
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Updates an existing {@link TableEntity entity} by merging the provided {@link TableEntity entity} with the
* existing {@link TableEntity entity}.
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table. Prints out the details of the updated
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.updateEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntity
*
* @param entity The {@link TableEntity entity} to update.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void updateEntity(TableEntity entity) {
updateEntity(entity, null);
}
/**
* Updates an existing {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode}.
* The default {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity entity}.
* </p>
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table with the specified
* {@link TableEntityUpdateMode update mode}. Prints out the details of the updated {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntity
* <pre>
*
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* tableClient.updateEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntity
*
* @param entity The {@link TableEntity entity} to update.
* @param updateMode The type of update to perform.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void updateEntity(TableEntity entity, TableEntityUpdateMode updateMode) {
updateEntityWithResponse(entity, updateMode, false, null, null);
}
/**
* Updates an existing {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode}.
* The default {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity entity}.
* </p>
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table with the specified {@link TableEntityUpdateMode update
* mode}
* if the {@code ETags} on both {@link TableEntity entities} match. Prints out the details of the
* {@link Response HTTP response} updated {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntityWithResponse
* <pre>
* TableEntity someTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.updateEntityWithResponse&
* true, Duration.ofSeconds&
*
* System.out.printf&
* + " '%s' was updated.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntityWithResponse
*
* @param entity The {@link TableEntity entity} to update.
* @param updateMode The type of update to perform.
* @param ifUnchanged When true, the ETag of the provided {@link TableEntity entity} must match the ETag of the
* {@link TableEntity entity} in the Table service. If the values do not match, the update will not occur and an
* exception will be thrown.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table, or if {@code ifUnchanged} is {@code true} and the existing {@link TableEntity entity}'s ETag
* does not match that of the provided {@link TableEntity entity}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> updateEntityWithResponse(TableEntity entity, TableEntityUpdateMode updateMode,
boolean ifUnchanged, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
String partitionKey = TableUtils.escapeSingleQuotes(entity.getPartitionKey());
String rowKey = TableUtils.escapeSingleQuotes(entity.getRowKey());
String eTag = ifUnchanged ? entity.getETag() : "*";
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
if (updateMode == TableEntityUpdateMode.REPLACE) {
return tablesImplementation.getTables()
.updateEntityWithResponse(tableName, partitionKey, rowKey, null, null, eTag,
entity.getProperties(), null, contextValue);
} else {
return tablesImplementation.getTables()
.mergeEntityWithResponse(tableName, partitionKey, rowKey, null, null, eTag,
entity.getProperties(), null, contextValue);
}
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes an {@link TableEntity entity} on the table. Prints out the entity's {@code partitionKey} and
* {@code rowKey}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntity
* <pre>
* tableClient.deleteEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntity
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The row key of the {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null} or
* empty.
* @throws TableServiceException If the request is rejected by the service.
* @throws IllegalArgumentException If 'partitionKey' or 'rowKey' is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteEntity(String partitionKey, String rowKey) {
deleteEntityWithResponse(partitionKey, rowKey, null, false, null, null);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a {@link TableEntity entity} on the table. Prints out the details of the deleted
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntity
* <pre>
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* tableClient.deleteEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntity
*
* @param entity The {@link TableEntity entity} to delete.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteEntity(TableEntity entity) {
deleteEntityWithResponse(entity, false, null, null);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a {@link TableEntity entity} on the table. Prints out the details of the
* {@link Response HTTP response} and the deleted {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntityWithResponse
* <pre>
* TableEntity someTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.deleteEntityWithResponse&
* new Context&
*
* System.out.printf&
* + " '%s' was deleted.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntityWithResponse
*
* @param entity The table {@link TableEntity entity} to delete.
* @param ifUnchanged When true, the ETag of the provided {@link TableEntity entity} must match the ETag of the
* {@link TableEntity entity} in the Table service. If the values do not match, the update will not occur and an
* exception will be thrown.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
* @throws IllegalArgumentException If the entity has null 'partitionKey' or 'rowKey'.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteEntityWithResponse(TableEntity entity, boolean ifUnchanged, Duration timeout,
Context context) {
return deleteEntityWithResponse(
entity.getPartitionKey(), entity.getRowKey(), entity.getETag(), ifUnchanged, timeout, context);
}
private Response<Void> deleteEntityWithResponse(String partitionKey, String rowKey, String eTag, boolean ifUnchanged,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
String finalETag = ifUnchanged ? eTag : "*";
if (partitionKey == null || rowKey == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'partitionKey' and 'rowKey' cannot be null"));
}
Supplier<Response<Void>> callable = () -> tablesImplementation.getTables().deleteEntityWithResponse(
tableName, TableUtils.escapeSingleQuotes(partitionKey), TableUtils.escapeSingleQuotes(rowKey), finalETag,
null, null, null, contextValue);
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
return swallow404Exception(mapThrowableToTableServiceException(ex));
}
}
/**
* Lists all {@link TableEntity entities} within the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Lists all {@link TableEntity entities} on the table. Prints out the details of the
* retrieved {@link TableEntity entities}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.listEntities -->
* <pre>
* PagedIterable<TableEntity> tableEntities = tableClient.listEntities&
*
* tableEntities.forEach&
* System.out.printf&
* tableEntity.getPartitionKey&
* </pre>
* <!-- end com.azure.data.tables.tableClient.listEntities -->
*
* @return A {@link PagedIterable} containing all {@link TableEntity entities} within the table.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<TableEntity> listEntities() {
return listEntities(new ListEntitiesOptions(), null, null);
}
/**
* Lists {@link TableEntity entities} using the parameters in the provided options.
*
* <p>If the {@code filter} parameter in the options is set, only {@link TableEntity entities} matching the filter
* will be returned. If the {@code select} parameter is set, only the properties included in the select parameter
* will be returned for each {@link TableEntity entity}. If the {@code top} parameter is set, the maximum number of
* returned {@link TableEntity entities} per page will be limited to that value.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Lists all {@link TableEntity entities} on the table. Prints out the details of the
* {@link Response HTTP response} and all the retrieved {@link TableEntity entities}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.listEntities
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* ListEntitiesOptions listEntitiesOptions = new ListEntitiesOptions&
* .setTop&
* .setFilter&
* .setSelect&
*
* PagedIterable<TableEntity> myTableEntities = tableClient.listEntities&
* Duration.ofSeconds&
*
* myTableEntities.forEach&
* System.out.printf&
* tableEntity.getPartitionKey&
*
* tableEntity.getProperties&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.tables.tableClient.listEntities
*
* @param options The {@code filter}, {@code select}, and {@code top} OData query options to apply to this
* operation.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return A {@link PagedIterable} containing matching {@link TableEntity entities} within the table.
*
* @throws IllegalArgumentException If one or more of the OData query options in {@code options} is malformed.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
private <T extends TableEntity> PagedResponse<T> listEntitiesFirstPage(Context context,
ListEntitiesOptions options,
Class<T> resultType) {
return listEntities(null, null, context, options, resultType);
}
private <T extends TableEntity> PagedResponse<T> listEntitiesNextPage(String token, Context context,
ListEntitiesOptions options,
Class<T> resultType) {
if (token == null) {
return null;
}
try {
String[] keys = TableUtils.getKeysFromToken(token);
return listEntities(keys[0], keys[1], context, options, resultType);
} catch (RuntimeException ex) {
throw logger.logExceptionAsError(ex);
}
}
private <T extends TableEntity> PagedResponse<T> listEntities(String nextPartitionKey, String nextRowKey,
Context context, ListEntitiesOptions options,
Class<T> resultType) {
Context contextValue = TableUtils.setContext(context, true);
String select = null;
if (options.getSelect() != null) {
select = String.join(",", options.getSelect());
}
QueryOptions queryOptions = new QueryOptions()
.setFilter(options.getFilter())
.setTop(options.getTop())
.setSelect(select)
.setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA);
final ResponseBase<TablesQueryEntitiesHeaders, TableEntityQueryResponse> response =
tablesImplementation.getTables().queryEntitiesWithResponse(tableName, null, null,
nextPartitionKey, nextRowKey, queryOptions, contextValue);
final TableEntityQueryResponse tablesQueryEntityResponse = response.getValue();
if (tablesQueryEntityResponse == null) {
return null;
}
final List<Map<String, Object>> entityResponseValue = tablesQueryEntityResponse.getValue();
if (entityResponseValue == null) {
return null;
}
final List<T> entities = entityResponseValue.stream()
.map(TableEntityAccessHelper::createEntity)
.map(e -> EntityHelper.convertToSubclass(e, resultType, logger))
.collect(Collectors.toList());
return new EntityPaged<>(response, entities,
response.getDeserializedHeaders().getXMsContinuationNextPartitionKey(),
response.getDeserializedHeaders().getXMsContinuationNextRowKey());
}
/**
* Gets a single {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets an {@link TableEntity entity} on the table. Prints out the details of the retrieved
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getEntity
* <pre>
* TableEntity tableEntity = tableClient.getEntity&
*
* System.out.printf&
* tableEntity.getRowKey&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getEntity
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The partition key of the {@link TableEntity entity}.
*
* @return The {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null} or
* empty.
* @throws TableServiceException If no {@link TableEntity entity} with the provided {@code partitionKey} and
* {@code rowKey} exists within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableEntity getEntity(String partitionKey, String rowKey) {
return getEntityWithResponse(partitionKey, rowKey, null, null, null).getValue();
}
/**
* Gets a single {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets an {@link TableEntity entity} on the table. Prints out the details of the {@link Response HTTP response}
* retrieved {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getEntityWithResponse
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* Response<TableEntity> response = tableClient.getEntityWithResponse&
* Duration.ofSeconds&
*
* TableEntity myTableEntity = response.getValue&
*
* System.out.printf&
* + " '%s' and properties:", response.getStatusCode&
* myTableEntity.getRowKey&
*
* myTableEntity.getProperties&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getEntityWithResponse
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The partition key of the {@link TableEntity entity}.
* @param select A list of properties to select on the {@link TableEntity entity}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response} containing the {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null}
* or if the {@code select} OData query option is malformed.
* @throws TableServiceException If no {@link TableEntity entity} with the provided {@code partitionKey} and
* {@code rowKey} exists within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableEntity> getEntityWithResponse(String partitionKey, String rowKey, List<String> select,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
QueryOptions queryOptions = new QueryOptions()
.setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA);
if (select != null) {
queryOptions.setSelect(String.join(",", select));
}
if (partitionKey == null || rowKey == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("'partitionKey' and 'rowKey' cannot be null."));
}
Supplier<Response<TableEntity>> callable = () -> {
ResponseBase<TablesQueryEntityWithPartitionAndRowKeyHeaders, Map<String, Object>> response =
tablesImplementation.getTables().queryEntityWithPartitionAndRowKeyWithResponse(
tableName, TableUtils.escapeSingleQuotes(partitionKey), TableUtils.escapeSingleQuotes(rowKey), null,
null, queryOptions, contextValue);
final Map<String, Object> matchingEntity = response.getValue();
if (matchingEntity == null || matchingEntity.isEmpty()) {
logger.info("There was no matching entity. Table {}, partition key: {}, row key: {}.",
tableName, partitionKey, rowKey);
return null;
}
final TableEntity entity = TableEntityAccessHelper.createEntity(matchingEntity);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
EntityHelper.convertToSubclass(entity, TableEntity.class, logger));
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Retrieves details about any stored {@link TableAccessPolicies access policies} specified on the table that may
* be used with Shared Access Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Gets a table's {@link TableAccessPolicies access policies}. Prints out the details of the retrieved
* {@link TableAccessPolicies access policies}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getAccessPolicies -->
* <pre>
* TableAccessPolicies accessPolicies = tableClient.getAccessPolicies&
*
* accessPolicies.getIdentifiers&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getAccessPolicies -->
*
* @return The table's {@link TableAccessPolicies access policies}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableAccessPolicies getAccessPolicies() {
return getAccessPoliciesWithResponse(null, null).getValue();
}
/**
* Retrieves details about any stored {@link TableAccessPolicies access policies} specified on the table that may be
* used with Shared Access Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Gets a table's {@link TableAccessPolicies access policies}. Prints out the details of the
* {@link Response HTTP response} and the retrieved {@link TableAccessPolicies access policies}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getAccessPoliciesWithResponse
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* Response<TableAccessPolicies> response = tableClient.getAccessPoliciesWithResponse&
* new Context&
*
* System.out.printf&
* + " IDs:", response.getStatusCode&
*
* response.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getAccessPoliciesWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return An {@link Response HTTP response} containing the table's {@link TableAccessPolicies access policies}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableAccessPolicies> getAccessPoliciesWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
Supplier<Response<TableAccessPolicies>> callable = () -> {
ResponseBase<TablesGetAccessPolicyHeaders, List<SignedIdentifier>> response =
tablesImplementation.getTables().getAccessPolicyWithResponse(tableName, null, null, contextValue);
return new SimpleResponse<>(response,
new TableAccessPolicies(response.getValue() == null ? null : response.getValue().stream()
.map(TableUtils::toTableSignedIdentifier)
.collect(Collectors.toList())));
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Sets stored {@link TableAccessPolicies access policies} for the table that may be used with Shared Access
* Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Sets stored {@link TableAccessPolicies access policies} on a table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.setAccessPolicies
* <pre>
* List<TableSignedIdentifier> signedIdentifiers = new ArrayList<>&
*
* signedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
* signedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* tableClient.setAccessPolicies&
*
* System.out.print&
* </pre>
* <!-- end com.azure.data.tables.tableClient.setAccessPolicies
*
* @param tableSignedIdentifiers The {@link TableSignedIdentifier access policies} for the table.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessPolicies(List<TableSignedIdentifier> tableSignedIdentifiers) {
setAccessPoliciesWithResponse(tableSignedIdentifiers, null, null);
}
/**
* Sets stored {@link TableAccessPolicies access policies} for the table that may be used with Shared Access
* Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Sets stored {@link TableAccessPolicies access policies} on a table. Prints out details of the
* {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.setAccessPoliciesWithResponse
* <pre>
* List<TableSignedIdentifier> mySignedIdentifiers = new ArrayList<>&
*
* mySignedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
* mySignedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* Response<Void> response = tableClient.setAccessPoliciesWithResponse&
* new Context&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.setAccessPoliciesWithResponse
*
* @param tableSignedIdentifiers The {@link TableSignedIdentifier access policies} for the table.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessPoliciesWithResponse(List<TableSignedIdentifier> tableSignedIdentifiers,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
List<SignedIdentifier> signedIdentifiers = null;
if (tableSignedIdentifiers != null) {
signedIdentifiers = tableSignedIdentifiers.stream()
.map(tableSignedIdentifier -> {
SignedIdentifier signedIdentifier = TableUtils.toSignedIdentifier(tableSignedIdentifier);
if (signedIdentifier != null) {
if (signedIdentifier.getAccessPolicy() != null
&& signedIdentifier.getAccessPolicy().getStart() != null) {
signedIdentifier.getAccessPolicy()
.setStart(signedIdentifier.getAccessPolicy()
.getStart().truncatedTo(ChronoUnit.SECONDS));
}
if (signedIdentifier.getAccessPolicy() != null
&& signedIdentifier.getAccessPolicy().getExpiry() != null) {
signedIdentifier.getAccessPolicy()
.setExpiry(signedIdentifier.getAccessPolicy()
.getExpiry().truncatedTo(ChronoUnit.SECONDS));
}
}
return signedIdentifier;
})
.collect(Collectors.toList());
}
List<SignedIdentifier> finalSignedIdentifiers = signedIdentifiers;
Supplier<Response<Void>> callable = () -> {
ResponseBase<TablesSetAccessPolicyHeaders, Void> response = tablesImplementation.getTables()
.setAccessPolicyWithResponse(tableName, null, null,
finalSignedIdentifiers, contextValue);
return new SimpleResponse<>(response, response.getValue());
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Executes all {@link TableTransactionAction actions} within the list inside a transaction. When the call
* completes, either all {@link TableTransactionAction actions} in the transaction will succeed, or if a failure
* occurs, all {@link TableTransactionAction actions} in the transaction will be rolled back.
* {@link TableTransactionAction Actions} are executed sequantially. Each {@link TableTransactionAction action}
* must operate on a distinct row key. Attempting to pass multiple {@link TableTransactionAction actions} that
* share the same row key will cause an error.
*
* <p><strong>Code Samples</strong></p>
* <p>Submits a transaction that contains multiple {@link TableTransactionAction actions} to be applied to
* {@link TableEntity entities} on a table. Prints out details of each {@link TableTransactionAction action}'s
* {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransaction
* <pre>
* List<TableTransactionAction> transactionActions = new ArrayList<>&
*
* String partitionKey = "markers";
* String firstEntityRowKey = "m001";
* String secondEntityRowKey = "m002";
*
* TableEntity firstEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* transactionActions.add&
*
* System.out.printf&
* firstEntityRowKey&
*
* TableEntity secondEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* transactionActions.add&
*
* System.out.printf&
* secondEntityRowKey&
*
* TableTransactionResult tableTransactionResult = tableClient.submitTransaction&
*
* System.out.print&
*
* tableTransactionResult.getTransactionActionResponses&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransaction
* <p>Shows how to handle a transaction with a failing {@link TableTransactionAction action} via the provided
* {@link TableTransactionFailedException exception}, which contains the index of the first failing action in the
* transaction.</p>
* <!-- src_embed com.azure.data.tables.tableAsyncClient.submitTransactionWithError
* <pre>
*
* tableAsyncClient.submitTransaction&
* .contextWrite&
* .doOnError&
* &
* int failedActionIndex = e.getFailedTransactionActionIndex&
* &
* &
* transactionActions.remove&
* &
* &
* .subscribe&
* System.out.print&
*
* tableTransactionResult.getTransactionActionResponses&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.tables.tableAsyncClient.submitTransactionWithError
*
* @param transactionActions A {@link List} of {@link TableTransactionAction actions} to perform on
* {@link TableEntity entities} in a table.
*
* @return A {@link List} of {@link TableTransactionActionResponse sub-responses} that correspond to each
* {@link TableTransactionResult action} in the transaction.
*
* @throws IllegalArgumentException If no {@link TableTransactionAction actions} have been added to the list.
* @throws TableServiceException If the request is rejected by the service.
* @throws TableTransactionFailedException If any {@link TableTransactionResult action} within the transaction
* fails. See the documentation for the client methods in {@link TableClient} to understand the conditions that
* may cause a given {@link TableTransactionAction action} to fail.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableTransactionResult submitTransaction(List<TableTransactionAction> transactionActions) {
return submitTransactionWithResponse(transactionActions, null, null).getValue();
}
/**
* Executes all {@link TableTransactionAction actions} within the list inside a transaction. When the call
* completes, either all {@link TableTransactionAction actions} in the transaction will succeed, or if a failure
* occurs, all {@link TableTransactionAction actions} in the transaction will be rolled back.
* {@link TableTransactionAction Actions} are executed sequantially. Each {@link TableTransactionAction action}
* must operate on a distinct row key. Attempting to pass multiple {@link TableTransactionAction actions} that
* share the same row key will cause an error.
*
* <p><strong>Code Samples</strong></p>
* <p>Submits a transaction that contains multiple {@link TableTransactionAction actions} to be applied to
* {@link TableEntity entities} on a table. Prints out details of the {@link Response HTTP response} for the
* operation, as well as each {@link TableTransactionAction action}'s corresponding {@link Response HTTP
* response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransactionWithResponse
* <pre>
* List<TableTransactionAction> myTransactionActions = new ArrayList<>&
*
* String myPartitionKey = "markers";
* String myFirstEntityRowKey = "m001";
* String mySecondEntityRowKey = "m002";
*
* TableEntity myFirstEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* myTransactionActions.add&
*
* System.out.printf&
* myFirstEntityRowKey&
*
* TableEntity mySecondEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* myTransactionActions.add&
*
* System.out.printf&
* mySecondEntityRowKey&
*
* Response<TableTransactionResult> response = tableClient.submitTransactionWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* + " actions are:", response.getStatusCode&
*
* response.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransactionWithResponse
* <p>Shows how to handle a transaction with a failing {@link TableTransactionAction action} via the provided
* {@link TableTransactionFailedException exception}, which contains the index of the first failing action in the
* transaction.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransactionWithResponseWithError
* <pre>
* try &
* Response<TableTransactionResult> transactionResultResponse =
* tableClient.submitTransactionWithResponse&
* new Context&
*
* System.out.printf&
* + " submitted actions are:", transactionResultResponse.getStatusCode&
*
* transactionResultResponse.getValue&
* .forEach&
* System.out.printf&
* &
* &
* int failedActionIndex = e.getFailedTransactionActionIndex&
* &
* &
* myTransactionActions.remove&
* &
* &
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransactionWithResponseWithError
*
* @param transactionActions A {@link List} of {@link TableTransactionAction transaction actions} to perform on
* {@link TableEntity entities} in a table.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return An {@link Response HTTP response} produced for the transaction itself. The response's value will contain
* a {@link List} of {@link TableTransactionActionResponse sub-responses} that correspond to each
* {@link TableTransactionAction action} in the transaction.
*
* @throws IllegalArgumentException If no {@link TableTransactionAction actions} have been added to the list.
* @throws TableServiceException If the request is rejected by the service.
* @throws TableTransactionFailedException If any {@link TableTransactionAction action} within the transaction
* fails. See the documentation for the client methods in {@link TableClient} to understand the conditions that
* may cause a given {@link TableTransactionAction action} to fail.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableTransactionResult> submitTransactionWithResponse(List<TableTransactionAction> transactionActions, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (transactionActions.isEmpty()) {
throw logger.logExceptionAsError(
new IllegalArgumentException("A transaction must contain at least one operation."));
}
final List<TransactionalBatchAction> operations = new ArrayList<>();
for (TableTransactionAction transactionAction : transactionActions) {
switch (transactionAction.getActionType()) {
case CREATE:
operations.add(new TransactionalBatchAction.CreateEntity(transactionAction.getEntity()));
break;
case UPSERT_MERGE:
operations.add(new TransactionalBatchAction.UpsertEntity(transactionAction.getEntity(),
TableEntityUpdateMode.MERGE));
break;
case UPSERT_REPLACE:
operations.add(new TransactionalBatchAction.UpsertEntity(transactionAction.getEntity(),
TableEntityUpdateMode.REPLACE));
break;
case UPDATE_MERGE:
operations.add(new TransactionalBatchAction.UpdateEntity(transactionAction.getEntity(),
TableEntityUpdateMode.MERGE, transactionAction.getIfUnchanged()));
break;
case UPDATE_REPLACE:
operations.add(new TransactionalBatchAction.UpdateEntity(transactionAction.getEntity(),
TableEntityUpdateMode.REPLACE, transactionAction.getIfUnchanged()));
break;
case DELETE:
operations.add(
new TransactionalBatchAction.DeleteEntity(transactionAction.getEntity(),
transactionAction.getIfUnchanged()));
break;
default:
break;
}
}
Supplier<Response<TableTransactionResult>> callable = () -> {
BiConsumer<TransactionalBatchRequestBody, RequestActionPair> accumulator = (body, pair) ->
body.addChangeOperation(new TransactionalBatchSubRequest(pair.getAction(), pair.getRequest()));
BiConsumer<TransactionalBatchRequestBody, TransactionalBatchRequestBody> combiner = (body1, body2) ->
body2.getContents().forEach(req -> body1.addChangeOperation((TransactionalBatchSubRequest) req));
TransactionalBatchRequestBody requestBody =
operations.stream()
.map(op -> new RequestActionPair(op.prepareRequest(transactionalBatchClient), op))
.collect(TransactionalBatchRequestBody::new, accumulator, combiner);
ResponseBase<TransactionalBatchSubmitBatchHeaders, TableTransactionActionResponse[]> response =
transactionalBatchImplementation
.submitTransactionalBatchWithRestResponse(requestBody, null, contextValue);
Response<List<TableTransactionActionResponse>> parsedResponse = parseResponse(requestBody, response);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new TableTransactionResult(transactionActions, parsedResponse.getValue()));
};
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
throw logger.logExceptionAsError((RuntimeException) TableUtils.interpretException(ex));
}
}
private static class RequestActionPair {
private final HttpRequest request;
private final TransactionalBatchAction action;
RequestActionPair(HttpRequest request, TransactionalBatchAction action) {
this.request = request;
this.action = action;
}
public HttpRequest getRequest() {
return request;
}
public TransactionalBatchAction getAction() {
return action;
}
}
private Response<List<TableTransactionActionResponse>> parseResponse(TransactionalBatchRequestBody requestBody,
ResponseBase<TransactionalBatchSubmitBatchHeaders, TableTransactionActionResponse[]> response) {
TableServiceError error = null;
String errorMessage = null;
TransactionalBatchChangeSet changes = null;
TransactionalBatchAction failedAction = null;
Integer failedIndex = null;
if (requestBody.getContents().get(0) instanceof TransactionalBatchChangeSet) {
changes = (TransactionalBatchChangeSet) requestBody.getContents().get(0);
}
for (int i = 0; i < response.getValue().length; i++) {
TableTransactionActionResponse subResponse = response.getValue()[i];
if (changes != null && changes.getContents().get(i) != null) {
TableTransactionActionResponseAccessHelper.updateTableTransactionActionResponse(subResponse,
changes.getContents().get(i).getHttpRequest());
}
if (subResponse.getStatusCode() >= 400 && error == null && errorMessage == null) {
if (subResponse.getValue() instanceof TableServiceError) {
error = (TableServiceError) subResponse.getValue();
if (changes != null && error.getOdataError() != null
&& error.getOdataError().getMessage() != null
&& error.getOdataError().getMessage().getValue() != null) {
String message = error.getOdataError().getMessage().getValue();
try {
failedIndex = Integer.parseInt(message.substring(0, message.indexOf(":")));
failedAction = changes.getContents().get(failedIndex).getOperation();
} catch (NumberFormatException e) {
}
}
} else if (subResponse.getValue() instanceof String) {
errorMessage = "The service returned the following data for the failed operation: "
+ subResponse.getValue();
} else {
errorMessage =
"The service returned the following status code for the failed operation: "
+ subResponse.getStatusCode();
}
}
}
if (error != null || errorMessage != null) {
String message = "An action within the operation failed, the transaction has been rolled back.";
if (failedAction != null) {
message += " The failed operation was: " + failedAction;
} else if (errorMessage != null) {
message += " " + errorMessage;
}
throw logger.logExceptionAsError(new RuntimeException(
new TableTransactionFailedException(message, null, toTableServiceError(error), failedIndex)));
} else {
return new SimpleResponse<>(response, Arrays.asList(response.getValue()));
}
}
} | class TableClient {
private static final ExecutorService THREAD_POOL = TableUtils.getThreadPoolWithShutdownHook();
private final ClientLogger logger = new ClientLogger(TableClient.class);
private final String tableName;
private final AzureTableImpl tablesImplementation;
private final TransactionalBatchImpl transactionalBatchImplementation;
private final String accountName;
private final String tableEndpoint;
private final HttpPipeline pipeline;
private final TableClient transactionalBatchClient;
TableClient(String tableName, HttpPipeline pipeline, String serviceUrl, TableServiceVersion serviceVersion,
SerializerAdapter tablesSerializer, SerializerAdapter transactionalBatchSerializer) {
try {
if (tableName == null) {
throw new NullPointerException(("'tableName' must not be null to create TableClient."));
}
if (tableName.isEmpty()) {
throw new IllegalArgumentException("'tableName' must not be empty to create a TableClient.");
}
final URI uri = URI.create(serviceUrl);
this.accountName = uri.getHost().split("\\.", 2)[0];
this.tableEndpoint = uri.resolve("/" + tableName).toString();
logger.verbose("Table Service URI: {}", uri);
} catch (NullPointerException | IllegalArgumentException ex) {
throw logger.logExceptionAsError(ex);
}
this.tablesImplementation = new AzureTableImplBuilder()
.url(serviceUrl)
.serializerAdapter(tablesSerializer)
.pipeline(pipeline)
.version(serviceVersion.getVersion())
.buildClient();
this.transactionalBatchImplementation =
new TransactionalBatchImpl(tablesImplementation, transactionalBatchSerializer);
this.tableName = tableName;
this.pipeline = tablesImplementation.getHttpPipeline();
this.transactionalBatchClient = new TableClient(this, serviceVersion, tablesSerializer);
}
TableClient(TableClient client, ServiceVersion serviceVersion, SerializerAdapter tablesSerializer) {
this.accountName = client.getAccountName();
this.tableEndpoint = client.getTableEndpoint();
this.pipeline = BuilderHelper.buildNullClientPipeline();
this.tablesImplementation = new AzureTableImplBuilder()
.url(client.getTablesImplementation().getUrl())
.serializerAdapter(tablesSerializer)
.pipeline(this.pipeline)
.version(serviceVersion.getVersion())
.buildClient();
this.tableName = client.getTableName();
this.transactionalBatchImplementation = null;
this.transactionalBatchClient = null;
}
/**
* Gets the name of the table.
*
* @return The name of the table.
*/
public String getTableName() {
return tableName;
}
/**
* Gets the name of the account containing the table.
*
* @return The name of the account containing the table.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the endpoint for this table.
*
* @return The endpoint for this table.
*/
public String getTableEndpoint() {
return tableEndpoint;
}
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the {@link AzureTableImpl} powering this client.
*
* @return This client's {@link AzureTableImpl}.
*/
AzureTableImpl getTablesImplementation() {
return tablesImplementation;
}
/**
* Gets the REST API version used by this client.
*
* @return The REST API version used by this client.
*/
public TableServiceVersion getServiceVersion() {
return TableServiceVersion.fromString(tablesImplementation.getVersion());
}
/**
* Generates a service SAS for the table using the specified {@link TableSasSignatureValues}.
*
* <p><strong>Note:</strong> The client must be authenticated via {@link AzureNamedKeyCredential}.</p>
* <p>See {@link TableSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* @param tableSasSignatureValues {@link TableSasSignatureValues}.
*
* @return A {@code String} representing the SAS query parameters.
*
* @throws IllegalStateException If this {@link TableClient} is not authenticated with an
* {@link AzureNamedKeyCredential}.
*/
public String generateSas(TableSasSignatureValues tableSasSignatureValues) {
AzureNamedKeyCredential azureNamedKeyCredential = TableSasUtils.extractNamedKeyCredential(getHttpPipeline());
if (azureNamedKeyCredential == null) {
throw logger.logExceptionAsError(new IllegalStateException("Cannot generate a SAS token with a client that"
+ " is not authenticated with an AzureNamedKeyCredential."));
}
return new TableSasGenerator(tableSasSignatureValues, getTableName(), azureNamedKeyCredential).getSas();
}
/**
* Creates the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Creates a table. Prints out the details of the created table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createTable -->
* <pre>
* TableItem tableItem = tableClient.createTable&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createTable -->
*
* @return A {@link TableItem} that represents the table.
*
* @throws TableServiceException If a table with the same name already exists within the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableItem createTable() {
return createTableWithResponse(null, null).getValue();
}
/**
* Creates the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Creates a table. Prints out the details of the {@link Response HTTP response} and the created table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createTableWithResponse
* <pre>
* Response<TableItem> response = tableClient.createTableWithResponse&
* new Context&
*
* System.out.printf&
* response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createTableWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response} containing a {@link TableItem} that represents the table.
*
* @throws TableServiceException If a table with the same name already exists within the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableItem> createTableWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
final TableProperties properties = new TableProperties().setTableName(tableName);
Supplier<Response<TableItem>> callable = () ->
new SimpleResponse<>(tablesImplementation.getTables().createWithResponse(properties, null,
ResponseFormat.RETURN_NO_CONTENT, null, contextValue),
TableItemAccessHelper.createItem(new TableResponseProperties().setTableName(tableName)));
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Deletes the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteTable -->
* <pre>
* tableClient.deleteTable&
*
* System.out.print&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteTable -->
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteTable() {
deleteTableWithResponse(null, null);
}
/**
* Deletes the table within the Tables service.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a table. Prints out the details of the {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteTableWithResponse
* <pre>
* Response<Void> response = tableClient.deleteTableWithResponse&
* new Context&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteTableWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteTableWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
Supplier<Response<Void>> callable = () -> new SimpleResponse<>(tablesImplementation.getTables()
.deleteWithResponse(tableName, null, contextValue), null);
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
Throwable except = mapThrowableToTableServiceException(ex);
return swallow404Exception(except);
}
}
private Response<Void> swallow404Exception(Throwable ex) {
if (ex instanceof TableServiceException
&& ((TableServiceException) ex).getResponse().getStatusCode() == 404) {
return new SimpleResponse<>(
((TableServiceException) ex).getResponse().getRequest(),
((TableServiceException) ex).getResponse().getStatusCode(),
((TableServiceException) ex).getResponse().getHeaders(),
null);
} else {
throw logger.logExceptionAsError((RuntimeException) (TableUtils.mapThrowableToTableServiceException(ex)));
}
}
/**
* Inserts an {@link TableEntity entity} into the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Inserts an {@link TableEntity entity} into the table. Prints out the details of the created
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.createEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createEntity
*
* @param entity The {@link TableEntity entity} to insert.
*
* @throws TableServiceException If an {@link TableEntity entity} with the same partition key and row key already
* exists within the table.
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void createEntity(TableEntity entity) {
createEntityWithResponse(entity, null, null);
}
/**
* Inserts an {@link TableEntity entity} into the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Inserts an {@link TableEntity entity} into the table. Prints out the details of the
* {@link Response HTTP response} and the created {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.createEntityWithResponse
* <pre>
*
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.createEntityWithResponse&
* new Context&
*
* System.out.printf&
* + " '%s' was created.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.createEntityWithResponse
*
* @param entity The {@link TableEntity entity} to insert.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If an {@link TableEntity entity} with the same partition key and row key already
* exists within the table.
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> createEntityWithResponse(TableEntity entity, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
Response<Map<String, Object>> response = tablesImplementation.getTables().insertEntityWithResponse(
tableName, null, null, ResponseFormat.RETURN_NO_CONTENT,
entity.getProperties(), null, contextValue);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null);
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Inserts an {@link TableEntity entity} into the table if it does not exist, or merges the
* {@link TableEntity entity} with the existing {@link TableEntity entity} otherwise.
*
* <p><strong>Code Samples</strong></p>
* <p>Upserts an {@link TableEntity entity} into the table. Prints out the details of the upserted
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.upsertEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.upsertEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.upsertEntity
*
* @param entity The {@link TableEntity entity} to upsert.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void upsertEntity(TableEntity entity) {
upsertEntityWithResponse(entity, null, null, null);
}
/**
* Inserts an {@link TableEntity entity} into the table if it does not exist, or updates the existing
* {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode} otherwise. The default
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity
* entity}.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Upserts an {@link TableEntity entity} into the table with the specified
* {@link TableEntityUpdateMode update mode} if said {@link TableEntity entity} already exists. Prints out the
* details of the {@link Response HTTP response} and the upserted {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.upsertEntityWithResponse
* <pre>
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.upsertEntityWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* + " '%s' was updated&
* </pre>
* <!-- end com.azure.data.tables.tableClient.upsertEntityWithResponse
*
* @param entity The {@link TableEntity entity} to upsert.
* @param updateMode The type of update to perform if the {@link TableEntity entity} already exits.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> upsertEntityWithResponse(TableEntity entity, TableEntityUpdateMode updateMode,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
String partitionKey = TableUtils.escapeSingleQuotes(entity.getPartitionKey());
String rowKey = TableUtils.escapeSingleQuotes(entity.getRowKey());
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
if (updateMode == TableEntityUpdateMode.REPLACE) {
return tablesImplementation.getTables().updateEntityWithResponse(
tableName, partitionKey, rowKey, null, null, null,
entity.getProperties(), null, contextValue);
} else {
return tablesImplementation.getTables().mergeEntityWithResponse(
tableName, partitionKey, rowKey, null, null, null,
entity.getProperties(), null, contextValue);
}
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Updates an existing {@link TableEntity entity} by merging the provided {@link TableEntity entity} with the
* existing {@link TableEntity entity}.
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table. Prints out the details of the updated
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntity
* <pre>
* TableEntity tableEntity = new TableEntity&
* .addProperty&
*
* tableClient.updateEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntity
*
* @param entity The {@link TableEntity entity} to update.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void updateEntity(TableEntity entity) {
updateEntity(entity, null);
}
/**
* Updates an existing {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode}.
* The default {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity entity}.
* </p>
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table with the specified
* {@link TableEntityUpdateMode update mode}. Prints out the details of the updated {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntity
* <pre>
*
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* tableClient.updateEntity&
*
* System.out.printf&
* "rowKey"&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntity
*
* @param entity The {@link TableEntity entity} to update.
* @param updateMode The type of update to perform.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void updateEntity(TableEntity entity, TableEntityUpdateMode updateMode) {
updateEntityWithResponse(entity, updateMode, false, null, null);
}
/**
* Updates an existing {@link TableEntity entity} using the specified {@link TableEntityUpdateMode update mode}.
* The default {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
*
* <p>When the {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will be merged into the existing {@link TableEntity entity}. When the
* {@link TableEntityUpdateMode update mode} is {@link TableEntityUpdateMode
* {@link TableEntity entity}'s properties will completely replace those in the existing {@link TableEntity entity}.
* </p>
*
* <p><strong>Code Samples</strong></p>
* <p>Updates a {@link TableEntity entity} on the table with the specified {@link TableEntityUpdateMode update
* mode}
* if the {@code ETags} on both {@link TableEntity entities} match. Prints out the details of the
* {@link Response HTTP response} updated {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.updateEntityWithResponse
* <pre>
* TableEntity someTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.updateEntityWithResponse&
* true, Duration.ofSeconds&
*
* System.out.printf&
* + " '%s' was updated.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.updateEntityWithResponse
*
* @param entity The {@link TableEntity entity} to update.
* @param updateMode The type of update to perform.
* @param ifUnchanged When true, the ETag of the provided {@link TableEntity entity} must match the ETag of the
* {@link TableEntity entity} in the Table service. If the values do not match, the update will not occur and an
* exception will be thrown.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws IllegalArgumentException If the provided {@link TableEntity entity} is {@code null}.
* @throws TableServiceException If no {@link TableEntity entity} with the same partition key and row key exists
* within the table, or if {@code ifUnchanged} is {@code true} and the existing {@link TableEntity entity}'s ETag
* does not match that of the provided {@link TableEntity entity}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> updateEntityWithResponse(TableEntity entity, TableEntityUpdateMode updateMode,
boolean ifUnchanged, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (entity == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'entity' cannot be null."));
}
String partitionKey = TableUtils.escapeSingleQuotes(entity.getPartitionKey());
String rowKey = TableUtils.escapeSingleQuotes(entity.getRowKey());
String eTag = ifUnchanged ? entity.getETag() : "*";
EntityHelper.setPropertiesFromGetters(entity, logger);
Supplier<Response<Void>> callable = () -> {
if (updateMode == TableEntityUpdateMode.REPLACE) {
return tablesImplementation.getTables()
.updateEntityWithResponse(tableName, partitionKey, rowKey, null, null, eTag,
entity.getProperties(), null, contextValue);
} else {
return tablesImplementation.getTables()
.mergeEntityWithResponse(tableName, partitionKey, rowKey, null, null, eTag,
entity.getProperties(), null, contextValue);
}
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes an {@link TableEntity entity} on the table. Prints out the entity's {@code partitionKey} and
* {@code rowKey}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntity
* <pre>
* tableClient.deleteEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntity
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The row key of the {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null} or
* empty.
* @throws TableServiceException If the request is rejected by the service.
* @throws IllegalArgumentException If 'partitionKey' or 'rowKey' is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteEntity(String partitionKey, String rowKey) {
deleteEntityWithResponse(partitionKey, rowKey, null, false, null, null);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a {@link TableEntity entity} on the table. Prints out the details of the deleted
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntity
* <pre>
* TableEntity myTableEntity = new TableEntity&
* .addProperty&
*
* tableClient.deleteEntity&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntity
*
* @param entity The {@link TableEntity entity} to delete.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteEntity(TableEntity entity) {
deleteEntityWithResponse(entity, false, null, null);
}
/**
* Deletes an {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Deletes a {@link TableEntity entity} on the table. Prints out the details of the
* {@link Response HTTP response} and the deleted {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.deleteEntityWithResponse
* <pre>
* TableEntity someTableEntity = new TableEntity&
* .addProperty&
*
* Response<Void> response = tableClient.deleteEntityWithResponse&
* new Context&
*
* System.out.printf&
* + " '%s' was deleted.", response.getStatusCode&
* </pre>
* <!-- end com.azure.data.tables.tableClient.deleteEntityWithResponse
*
* @param entity The table {@link TableEntity entity} to delete.
* @param ifUnchanged When true, the ETag of the provided {@link TableEntity entity} must match the ETag of the
* {@link TableEntity entity} in the Table service. If the values do not match, the update will not occur and an
* exception will be thrown.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
* @throws IllegalArgumentException If the entity has null 'partitionKey' or 'rowKey'.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteEntityWithResponse(TableEntity entity, boolean ifUnchanged, Duration timeout,
Context context) {
return deleteEntityWithResponse(
entity.getPartitionKey(), entity.getRowKey(), entity.getETag(), ifUnchanged, timeout, context);
}
private Response<Void> deleteEntityWithResponse(String partitionKey, String rowKey, String eTag, boolean ifUnchanged,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
String finalETag = ifUnchanged ? eTag : "*";
if (partitionKey == null || rowKey == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("'partitionKey' and 'rowKey' cannot be null"));
}
Supplier<Response<Void>> callable = () -> tablesImplementation.getTables().deleteEntityWithResponse(
tableName, TableUtils.escapeSingleQuotes(partitionKey), TableUtils.escapeSingleQuotes(rowKey), finalETag,
null, null, null, contextValue);
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
return swallow404Exception(mapThrowableToTableServiceException(ex));
}
}
/**
* Lists all {@link TableEntity entities} within the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Lists all {@link TableEntity entities} on the table. Prints out the details of the
* retrieved {@link TableEntity entities}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.listEntities -->
* <pre>
* PagedIterable<TableEntity> tableEntities = tableClient.listEntities&
*
* tableEntities.forEach&
* System.out.printf&
* tableEntity.getPartitionKey&
* </pre>
* <!-- end com.azure.data.tables.tableClient.listEntities -->
*
* @return A {@link PagedIterable} containing all {@link TableEntity entities} within the table.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<TableEntity> listEntities() {
return listEntities(new ListEntitiesOptions(), null, null);
}
/**
* Lists {@link TableEntity entities} using the parameters in the provided options.
*
* <p>If the {@code filter} parameter in the options is set, only {@link TableEntity entities} matching the filter
* will be returned. If the {@code select} parameter is set, only the properties included in the select parameter
* will be returned for each {@link TableEntity entity}. If the {@code top} parameter is set, the maximum number of
* returned {@link TableEntity entities} per page will be limited to that value.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Lists all {@link TableEntity entities} on the table. Prints out the details of the
* {@link Response HTTP response} and all the retrieved {@link TableEntity entities}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.listEntities
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* ListEntitiesOptions listEntitiesOptions = new ListEntitiesOptions&
* .setTop&
* .setFilter&
* .setSelect&
*
* PagedIterable<TableEntity> myTableEntities = tableClient.listEntities&
* Duration.ofSeconds&
*
* myTableEntities.forEach&
* System.out.printf&
* tableEntity.getPartitionKey&
*
* tableEntity.getProperties&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.tables.tableClient.listEntities
*
* @param options The {@code filter}, {@code select}, and {@code top} OData query options to apply to this
* operation.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return A {@link PagedIterable} containing matching {@link TableEntity entities} within the table.
*
* @throws IllegalArgumentException If one or more of the OData query options in {@code options} is malformed.
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
private <T extends TableEntity> PagedResponse<T> listEntitiesFirstPage(Context context,
ListEntitiesOptions options,
Class<T> resultType) {
return listEntities(null, null, context, options, resultType);
}
private <T extends TableEntity> PagedResponse<T> listEntitiesNextPage(String token, Context context,
ListEntitiesOptions options,
Class<T> resultType) {
if (token == null) {
return null;
}
try {
String[] keys = TableUtils.getKeysFromToken(token);
return listEntities(keys[0], keys[1], context, options, resultType);
} catch (RuntimeException ex) {
throw logger.logExceptionAsError(ex);
}
}
private <T extends TableEntity> PagedResponse<T> listEntities(String nextPartitionKey, String nextRowKey,
Context context, ListEntitiesOptions options,
Class<T> resultType) {
Context contextValue = TableUtils.setContext(context, true);
String select = null;
if (options.getSelect() != null) {
select = String.join(",", options.getSelect());
}
QueryOptions queryOptions = new QueryOptions()
.setFilter(options.getFilter())
.setTop(options.getTop())
.setSelect(select)
.setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA);
final ResponseBase<TablesQueryEntitiesHeaders, TableEntityQueryResponse> response =
tablesImplementation.getTables().queryEntitiesWithResponse(tableName, null, null,
nextPartitionKey, nextRowKey, queryOptions, contextValue);
final TableEntityQueryResponse tablesQueryEntityResponse = response.getValue();
if (tablesQueryEntityResponse == null) {
return null;
}
final List<Map<String, Object>> entityResponseValue = tablesQueryEntityResponse.getValue();
if (entityResponseValue == null) {
return null;
}
final List<T> entities = entityResponseValue.stream()
.map(TableEntityAccessHelper::createEntity)
.map(e -> EntityHelper.convertToSubclass(e, resultType, logger))
.collect(Collectors.toList());
return new EntityPaged<>(response, entities,
response.getDeserializedHeaders().getXMsContinuationNextPartitionKey(),
response.getDeserializedHeaders().getXMsContinuationNextRowKey());
}
/**
* Gets a single {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets an {@link TableEntity entity} on the table. Prints out the details of the retrieved
* {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getEntity
* <pre>
* TableEntity tableEntity = tableClient.getEntity&
*
* System.out.printf&
* tableEntity.getRowKey&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getEntity
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The partition key of the {@link TableEntity entity}.
*
* @return The {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null} or
* empty.
* @throws TableServiceException If no {@link TableEntity entity} with the provided {@code partitionKey} and
* {@code rowKey} exists within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableEntity getEntity(String partitionKey, String rowKey) {
return getEntityWithResponse(partitionKey, rowKey, null, null, null).getValue();
}
/**
* Gets a single {@link TableEntity entity} from the table.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets an {@link TableEntity entity} on the table. Prints out the details of the {@link Response HTTP response}
* retrieved {@link TableEntity entity}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getEntityWithResponse
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* Response<TableEntity> response = tableClient.getEntityWithResponse&
* Duration.ofSeconds&
*
* TableEntity myTableEntity = response.getValue&
*
* System.out.printf&
* + " '%s' and properties:", response.getStatusCode&
* myTableEntity.getRowKey&
*
* myTableEntity.getProperties&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getEntityWithResponse
*
* @param partitionKey The partition key of the {@link TableEntity entity}.
* @param rowKey The partition key of the {@link TableEntity entity}.
* @param select A list of properties to select on the {@link TableEntity entity}.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response} containing the {@link TableEntity entity}.
*
* @throws IllegalArgumentException If the provided {@code partitionKey} or {@code rowKey} are {@code null}
* or if the {@code select} OData query option is malformed.
* @throws TableServiceException If no {@link TableEntity entity} with the provided {@code partitionKey} and
* {@code rowKey} exists within the table.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableEntity> getEntityWithResponse(String partitionKey, String rowKey, List<String> select,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
QueryOptions queryOptions = new QueryOptions()
.setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA);
if (select != null) {
queryOptions.setSelect(String.join(",", select));
}
if (partitionKey == null || rowKey == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("'partitionKey' and 'rowKey' cannot be null."));
}
Supplier<Response<TableEntity>> callable = () -> {
ResponseBase<TablesQueryEntityWithPartitionAndRowKeyHeaders, Map<String, Object>> response =
tablesImplementation.getTables().queryEntityWithPartitionAndRowKeyWithResponse(
tableName, TableUtils.escapeSingleQuotes(partitionKey), TableUtils.escapeSingleQuotes(rowKey), null,
null, queryOptions, contextValue);
final Map<String, Object> matchingEntity = response.getValue();
if (matchingEntity == null || matchingEntity.isEmpty()) {
logger.info("There was no matching entity. Table {}, partition key: {}, row key: {}.",
tableName, partitionKey, rowKey);
return null;
}
final TableEntity entity = TableEntityAccessHelper.createEntity(matchingEntity);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
EntityHelper.convertToSubclass(entity, TableEntity.class, logger));
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Retrieves details about any stored {@link TableAccessPolicies access policies} specified on the table that may
* be used with Shared Access Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Gets a table's {@link TableAccessPolicies access policies}. Prints out the details of the retrieved
* {@link TableAccessPolicies access policies}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getAccessPolicies -->
* <pre>
* TableAccessPolicies accessPolicies = tableClient.getAccessPolicies&
*
* accessPolicies.getIdentifiers&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getAccessPolicies -->
*
* @return The table's {@link TableAccessPolicies access policies}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableAccessPolicies getAccessPolicies() {
return getAccessPoliciesWithResponse(null, null).getValue();
}
/**
* Retrieves details about any stored {@link TableAccessPolicies access policies} specified on the table that may be
* used with Shared Access Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Gets a table's {@link TableAccessPolicies access policies}. Prints out the details of the
* {@link Response HTTP response} and the retrieved {@link TableAccessPolicies access policies}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.getAccessPoliciesWithResponse
* <pre>
* List<String> propertiesToSelect = new ArrayList<>&
* propertiesToSelect.add&
* propertiesToSelect.add&
* propertiesToSelect.add&
*
* Response<TableAccessPolicies> response = tableClient.getAccessPoliciesWithResponse&
* new Context&
*
* System.out.printf&
* + " IDs:", response.getStatusCode&
*
* response.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.getAccessPoliciesWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return An {@link Response HTTP response} containing the table's {@link TableAccessPolicies access policies}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableAccessPolicies> getAccessPoliciesWithResponse(Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
Supplier<Response<TableAccessPolicies>> callable = () -> {
ResponseBase<TablesGetAccessPolicyHeaders, List<SignedIdentifier>> response =
tablesImplementation.getTables().getAccessPolicyWithResponse(tableName, null, null, contextValue);
return new SimpleResponse<>(response,
new TableAccessPolicies(response.getValue() == null ? null : response.getValue().stream()
.map(TableUtils::toTableSignedIdentifier)
.collect(Collectors.toList())));
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Sets stored {@link TableAccessPolicies access policies} for the table that may be used with Shared Access
* Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Sets stored {@link TableAccessPolicies access policies} on a table.</p>
* <!-- src_embed com.azure.data.tables.tableClient.setAccessPolicies
* <pre>
* List<TableSignedIdentifier> signedIdentifiers = new ArrayList<>&
*
* signedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
* signedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* tableClient.setAccessPolicies&
*
* System.out.print&
* </pre>
* <!-- end com.azure.data.tables.tableClient.setAccessPolicies
*
* @param tableSignedIdentifiers The {@link TableSignedIdentifier access policies} for the table.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setAccessPolicies(List<TableSignedIdentifier> tableSignedIdentifiers) {
setAccessPoliciesWithResponse(tableSignedIdentifiers, null, null);
}
/**
* Sets stored {@link TableAccessPolicies access policies} for the table that may be used with Shared Access
* Signatures.
*
* <p>This operation is only supported on Azure Storage endpoints.</p>
*
* <p><strong>Code Samples</strong></p>
* <p>Sets stored {@link TableAccessPolicies access policies} on a table. Prints out details of the
* {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.setAccessPoliciesWithResponse
* <pre>
* List<TableSignedIdentifier> mySignedIdentifiers = new ArrayList<>&
*
* mySignedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
* mySignedIdentifiers.add&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* Response<Void> response = tableClient.setAccessPoliciesWithResponse&
* new Context&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.setAccessPoliciesWithResponse
*
* @param tableSignedIdentifiers The {@link TableSignedIdentifier access policies} for the table.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return The {@link Response HTTP response}.
*
* @throws TableServiceException If the request is rejected by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setAccessPoliciesWithResponse(List<TableSignedIdentifier> tableSignedIdentifiers,
Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
List<SignedIdentifier> signedIdentifiers = null;
if (tableSignedIdentifiers != null) {
signedIdentifiers = tableSignedIdentifiers.stream()
.map(tableSignedIdentifier -> {
SignedIdentifier signedIdentifier = TableUtils.toSignedIdentifier(tableSignedIdentifier);
if (signedIdentifier != null) {
if (signedIdentifier.getAccessPolicy() != null
&& signedIdentifier.getAccessPolicy().getStart() != null) {
signedIdentifier.getAccessPolicy()
.setStart(signedIdentifier.getAccessPolicy()
.getStart().truncatedTo(ChronoUnit.SECONDS));
}
if (signedIdentifier.getAccessPolicy() != null
&& signedIdentifier.getAccessPolicy().getExpiry() != null) {
signedIdentifier.getAccessPolicy()
.setExpiry(signedIdentifier.getAccessPolicy()
.getExpiry().truncatedTo(ChronoUnit.SECONDS));
}
}
return signedIdentifier;
})
.collect(Collectors.toList());
}
List<SignedIdentifier> finalSignedIdentifiers = signedIdentifiers;
Supplier<Response<Void>> callable = () -> {
ResponseBase<TablesSetAccessPolicyHeaders, Void> response = tablesImplementation.getTables()
.setAccessPolicyWithResponse(tableName, null, null,
finalSignedIdentifiers, contextValue);
return new SimpleResponse<>(response, response.getValue());
};
return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger);
}
/**
* Executes all {@link TableTransactionAction actions} within the list inside a transaction. When the call
* completes, either all {@link TableTransactionAction actions} in the transaction will succeed, or if a failure
* occurs, all {@link TableTransactionAction actions} in the transaction will be rolled back.
* {@link TableTransactionAction Actions} are executed sequantially. Each {@link TableTransactionAction action}
* must operate on a distinct row key. Attempting to pass multiple {@link TableTransactionAction actions} that
* share the same row key will cause an error.
*
* <p><strong>Code Samples</strong></p>
* <p>Submits a transaction that contains multiple {@link TableTransactionAction actions} to be applied to
* {@link TableEntity entities} on a table. Prints out details of each {@link TableTransactionAction action}'s
* {@link Response HTTP response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransaction
* <pre>
* List<TableTransactionAction> transactionActions = new ArrayList<>&
*
* String partitionKey = "markers";
* String firstEntityRowKey = "m001";
* String secondEntityRowKey = "m002";
*
* TableEntity firstEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* transactionActions.add&
*
* System.out.printf&
* firstEntityRowKey&
*
* TableEntity secondEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* transactionActions.add&
*
* System.out.printf&
* secondEntityRowKey&
*
* TableTransactionResult tableTransactionResult = tableClient.submitTransaction&
*
* System.out.print&
*
* tableTransactionResult.getTransactionActionResponses&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransaction
* <p>Shows how to handle a transaction with a failing {@link TableTransactionAction action} via the provided
* {@link TableTransactionFailedException exception}, which contains the index of the first failing action in the
* transaction.</p>
* <!-- src_embed com.azure.data.tables.tableAsyncClient.submitTransactionWithError
* <pre>
*
* tableAsyncClient.submitTransaction&
* .contextWrite&
* .doOnError&
* &
* int failedActionIndex = e.getFailedTransactionActionIndex&
* &
* &
* transactionActions.remove&
* &
* &
* .subscribe&
* System.out.print&
*
* tableTransactionResult.getTransactionActionResponses&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.data.tables.tableAsyncClient.submitTransactionWithError
*
* @param transactionActions A {@link List} of {@link TableTransactionAction actions} to perform on
* {@link TableEntity entities} in a table.
*
* @return A {@link List} of {@link TableTransactionActionResponse sub-responses} that correspond to each
* {@link TableTransactionResult action} in the transaction.
*
* @throws IllegalArgumentException If no {@link TableTransactionAction actions} have been added to the list.
* @throws TableServiceException If the request is rejected by the service.
* @throws TableTransactionFailedException If any {@link TableTransactionResult action} within the transaction
* fails. See the documentation for the client methods in {@link TableClient} to understand the conditions that
* may cause a given {@link TableTransactionAction action} to fail.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public TableTransactionResult submitTransaction(List<TableTransactionAction> transactionActions) {
return submitTransactionWithResponse(transactionActions, null, null).getValue();
}
/**
* Executes all {@link TableTransactionAction actions} within the list inside a transaction. When the call
* completes, either all {@link TableTransactionAction actions} in the transaction will succeed, or if a failure
* occurs, all {@link TableTransactionAction actions} in the transaction will be rolled back.
* {@link TableTransactionAction Actions} are executed sequantially. Each {@link TableTransactionAction action}
* must operate on a distinct row key. Attempting to pass multiple {@link TableTransactionAction actions} that
* share the same row key will cause an error.
*
* <p><strong>Code Samples</strong></p>
* <p>Submits a transaction that contains multiple {@link TableTransactionAction actions} to be applied to
* {@link TableEntity entities} on a table. Prints out details of the {@link Response HTTP response} for the
* operation, as well as each {@link TableTransactionAction action}'s corresponding {@link Response HTTP
* response}.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransactionWithResponse
* <pre>
* List<TableTransactionAction> myTransactionActions = new ArrayList<>&
*
* String myPartitionKey = "markers";
* String myFirstEntityRowKey = "m001";
* String mySecondEntityRowKey = "m002";
*
* TableEntity myFirstEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* myTransactionActions.add&
*
* System.out.printf&
* myFirstEntityRowKey&
*
* TableEntity mySecondEntity = new TableEntity&
* .addProperty&
* .addProperty&
*
* myTransactionActions.add&
*
* System.out.printf&
* mySecondEntityRowKey&
*
* Response<TableTransactionResult> response = tableClient.submitTransactionWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* + " actions are:", response.getStatusCode&
*
* response.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransactionWithResponse
* <p>Shows how to handle a transaction with a failing {@link TableTransactionAction action} via the provided
* {@link TableTransactionFailedException exception}, which contains the index of the first failing action in the
* transaction.</p>
* <!-- src_embed com.azure.data.tables.tableClient.submitTransactionWithResponseWithError
* <pre>
* try &
* Response<TableTransactionResult> transactionResultResponse =
* tableClient.submitTransactionWithResponse&
* new Context&
*
* System.out.printf&
* + " submitted actions are:", transactionResultResponse.getStatusCode&
*
* transactionResultResponse.getValue&
* .forEach&
* System.out.printf&
* &
* &
* int failedActionIndex = e.getFailedTransactionActionIndex&
* &
* &
* myTransactionActions.remove&
* &
* &
* </pre>
* <!-- end com.azure.data.tables.tableClient.submitTransactionWithResponseWithError
*
* @param transactionActions A {@link List} of {@link TableTransactionAction transaction actions} to perform on
* {@link TableEntity entities} in a table.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during
* the service call.
*
* @return An {@link Response HTTP response} produced for the transaction itself. The response's value will contain
* a {@link List} of {@link TableTransactionActionResponse sub-responses} that correspond to each
* {@link TableTransactionAction action} in the transaction.
*
* @throws IllegalArgumentException If no {@link TableTransactionAction actions} have been added to the list.
* @throws TableServiceException If the request is rejected by the service.
* @throws TableTransactionFailedException If any {@link TableTransactionAction action} within the transaction
* fails. See the documentation for the client methods in {@link TableClient} to understand the conditions that
* may cause a given {@link TableTransactionAction action} to fail.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<TableTransactionResult> submitTransactionWithResponse(List<TableTransactionAction> transactionActions, Duration timeout, Context context) {
Context contextValue = TableUtils.setContext(context, true);
if (transactionActions.isEmpty()) {
throw logger.logExceptionAsError(
new IllegalArgumentException("A transaction must contain at least one operation."));
}
final List<TransactionalBatchAction> operations = new ArrayList<>();
for (TableTransactionAction transactionAction : transactionActions) {
switch (transactionAction.getActionType()) {
case CREATE:
operations.add(new TransactionalBatchAction.CreateEntity(transactionAction.getEntity()));
break;
case UPSERT_MERGE:
operations.add(new TransactionalBatchAction.UpsertEntity(transactionAction.getEntity(),
TableEntityUpdateMode.MERGE));
break;
case UPSERT_REPLACE:
operations.add(new TransactionalBatchAction.UpsertEntity(transactionAction.getEntity(),
TableEntityUpdateMode.REPLACE));
break;
case UPDATE_MERGE:
operations.add(new TransactionalBatchAction.UpdateEntity(transactionAction.getEntity(),
TableEntityUpdateMode.MERGE, transactionAction.getIfUnchanged()));
break;
case UPDATE_REPLACE:
operations.add(new TransactionalBatchAction.UpdateEntity(transactionAction.getEntity(),
TableEntityUpdateMode.REPLACE, transactionAction.getIfUnchanged()));
break;
case DELETE:
operations.add(
new TransactionalBatchAction.DeleteEntity(transactionAction.getEntity(),
transactionAction.getIfUnchanged()));
break;
default:
break;
}
}
Supplier<Response<TableTransactionResult>> callable = () -> {
BiConsumer<TransactionalBatchRequestBody, RequestActionPair> accumulator = (body, pair) ->
body.addChangeOperation(new TransactionalBatchSubRequest(pair.getAction(), pair.getRequest()));
BiConsumer<TransactionalBatchRequestBody, TransactionalBatchRequestBody> combiner = (body1, body2) ->
body2.getContents().forEach(req -> body1.addChangeOperation((TransactionalBatchSubRequest) req));
TransactionalBatchRequestBody requestBody =
operations.stream()
.map(op -> new RequestActionPair(op.prepareRequest(transactionalBatchClient), op))
.collect(TransactionalBatchRequestBody::new, accumulator, combiner);
ResponseBase<TransactionalBatchSubmitBatchHeaders, TableTransactionActionResponse[]> response =
transactionalBatchImplementation
.submitTransactionalBatchWithRestResponse(requestBody, null, contextValue);
Response<List<TableTransactionActionResponse>> parsedResponse = parseResponse(requestBody, response);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(),
new TableTransactionResult(transactionActions, parsedResponse.getValue()));
};
try {
return hasTimeout(timeout)
? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get();
} catch (InterruptedException | ExecutionException | TimeoutException ex) {
throw logger.logExceptionAsError(new RuntimeException(ex));
} catch (RuntimeException ex) {
throw logger.logExceptionAsError((RuntimeException) TableUtils.interpretException(ex));
}
}
private static class RequestActionPair {
private final HttpRequest request;
private final TransactionalBatchAction action;
RequestActionPair(HttpRequest request, TransactionalBatchAction action) {
this.request = request;
this.action = action;
}
public HttpRequest getRequest() {
return request;
}
public TransactionalBatchAction getAction() {
return action;
}
}
private Response<List<TableTransactionActionResponse>> parseResponse(TransactionalBatchRequestBody requestBody,
ResponseBase<TransactionalBatchSubmitBatchHeaders, TableTransactionActionResponse[]> response) {
TableServiceError error = null;
String errorMessage = null;
TransactionalBatchChangeSet changes = null;
TransactionalBatchAction failedAction = null;
Integer failedIndex = null;
if (requestBody.getContents().get(0) instanceof TransactionalBatchChangeSet) {
changes = (TransactionalBatchChangeSet) requestBody.getContents().get(0);
}
for (int i = 0; i < response.getValue().length; i++) {
TableTransactionActionResponse subResponse = response.getValue()[i];
if (changes != null && changes.getContents().get(i) != null) {
TableTransactionActionResponseAccessHelper.updateTableTransactionActionResponse(subResponse,
changes.getContents().get(i).getHttpRequest());
}
if (subResponse.getStatusCode() >= 400 && error == null && errorMessage == null) {
if (subResponse.getValue() instanceof TableServiceError) {
error = (TableServiceError) subResponse.getValue();
if (changes != null && error.getOdataError() != null
&& error.getOdataError().getMessage() != null
&& error.getOdataError().getMessage().getValue() != null) {
String message = error.getOdataError().getMessage().getValue();
try {
failedIndex = Integer.parseInt(message.substring(0, message.indexOf(":")));
failedAction = changes.getContents().get(failedIndex).getOperation();
} catch (NumberFormatException e) {
}
}
} else if (subResponse.getValue() instanceof String) {
errorMessage = "The service returned the following data for the failed operation: "
+ subResponse.getValue();
} else {
errorMessage =
"The service returned the following status code for the failed operation: "
+ subResponse.getStatusCode();
}
}
}
if (error != null || errorMessage != null) {
String message = "An action within the operation failed, the transaction has been rolled back.";
if (failedAction != null) {
message += " The failed operation was: " + failedAction;
} else if (errorMessage != null) {
message += " " + errorMessage;
}
throw logger.logExceptionAsError(new RuntimeException(
new TableTransactionFailedException(message, null, toTableServiceError(error), failedIndex)));
} else {
return new SimpleResponse<>(response, Arrays.asList(response.getValue()));
}
}
} |
nit: why not to do it the other way around? ```java if(playSourcesInternal.isEmpty()) { throw... } ... rest of code ... ``` | PlayRequest getPlayToAllRequest(PlayToAllOptions options) {
List<PlaySourceInternal> playSourcesInternal = new ArrayList<>();
for (PlaySource source: options.getPlaySources()) {
PlaySourceInternal playSourceInternal = null;
if (source instanceof FileSource) {
playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) source);
} else if (source instanceof TextSource) {
playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) source);
} else if (source instanceof SsmlSource) {
playSourceInternal = getPlaySourceInternalFromSsmlSource((SsmlSource) source);
}
if (playSourceInternal != null && playSourceInternal.getKind() != null) {
playSourcesInternal.add(playSourceInternal);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(source.getClass().getCanonicalName()));
}
}
if (!playSourcesInternal.isEmpty()) {
PlayRequest request = new PlayRequest()
.setPlaySources(playSourcesInternal);
request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop())
.setInterruptCallMediaOperation(options.isInterruptCallMediaOperation()));
request.setOperationContext(options.getOperationContext());
request.setOperationCallbackUri(options.getOperationCallbackUrl());
return request;
}
throw logger.logExceptionAsError(new IllegalArgumentException(options.getPlaySources().getClass().getCanonicalName()));
} | if (!playSourcesInternal.isEmpty()) { | PlayRequest getPlayToAllRequest(PlayToAllOptions options) {
List<PlaySourceInternal> playSourcesInternal = new ArrayList<>();
for (PlaySource source: options.getPlaySources()) {
PlaySourceInternal playSourceInternal = null;
if (source instanceof FileSource) {
playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) source);
} else if (source instanceof TextSource) {
playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) source);
} else if (source instanceof SsmlSource) {
playSourceInternal = getPlaySourceInternalFromSsmlSource((SsmlSource) source);
}
if (playSourceInternal != null && playSourceInternal.getKind() != null) {
playSourcesInternal.add(playSourceInternal);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(source.getClass().getCanonicalName()));
}
}
if (!playSourcesInternal.isEmpty()) {
PlayRequest request = new PlayRequest()
.setPlaySources(playSourcesInternal);
request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop())
.setInterruptCallMediaOperation(options.isInterruptCallMediaOperation()));
request.setOperationContext(options.getOperationContext());
request.setOperationCallbackUri(options.getOperationCallbackUrl());
return request;
}
throw logger.logExceptionAsError(new IllegalArgumentException(options.getPlaySources().getClass().getCanonicalName()));
} | class CallMediaAsync {
private final CallMediasImpl contentsInternal;
private final String callConnectionId;
private final ClientLogger logger;
CallMediaAsync(String callConnectionId, CallMediasImpl contentsInternal) {
this.callConnectionId = callConnectionId;
this.contentsInternal = contentsInternal;
this.logger = new ClientLogger(CallMediaAsync.class);
}
/**
* Play
*
* @param playSources A List of {@link PlaySource} representing the sources to play.
* @param playTo the targets to play to
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return Void for successful play request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> play(List<PlaySource> playSources, List<CommunicationIdentifier> playTo) {
PlayOptions options = new PlayOptions(playSources, playTo);
return playWithResponse(options).flatMap(FluxUtil::toMono);
}
/**
* Play
*
* @param playSource A {@link PlaySource} representing the source to play.
* @param playTo the targets to play to
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return Void for successful play request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> play(PlaySource playSource, List<CommunicationIdentifier> playTo) {
PlayOptions options = new PlayOptions(playSource, playTo);
return playWithResponse(options).flatMap(FluxUtil::toMono);
}
/**
* Play to all participants
*
* @param playSources A List of {@link PlaySource} representing the sources to play.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return Void for successful playAll request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> playToAll(List<PlaySource> playSources) {
PlayToAllOptions options = new PlayToAllOptions(playSources);
return playToAllWithResponse(options).flatMap(FluxUtil::toMono);
}
/**
* Play to all participants
*
* @param playSource A {@link PlaySource} representing the source to play.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return Void for successful playAll request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> playToAll(PlaySource playSource) {
PlayToAllOptions options = new PlayToAllOptions(playSource);
return playToAllWithResponse(options).flatMap(FluxUtil::toMono);
}
/**
* Play
*
* @param options play options.
* @return Response for successful play request.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> playWithResponse(PlayOptions options) {
return playWithResponseInternal(options, null);
}
/**
* Play to all participants
*
* @param options play to all options.
* @return Response for successful playAll request.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> playToAllWithResponse(PlayToAllOptions options) {
return playToAllWithResponseInternal(options, null);
}
/**
* Recognize operation.
* @param recognizeOptions Different attributes for recognize.
* @return Response for successful recognize request.
*/
public Mono<Void> startRecognizing(CallMediaRecognizeOptions recognizeOptions) {
return startRecognizingWithResponse(recognizeOptions).then();
}
/**
* Recognize operation
* @param recognizeOptions Different attributes for recognize.
* @return Response for successful recognize request.
*/
public Mono<Response<Void>> startRecognizingWithResponse(CallMediaRecognizeOptions recognizeOptions) {
return withContext(context -> recognizeWithResponseInternal(recognizeOptions, context));
}
Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) {
try {
context = context == null ? Context.NONE : context;
if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) {
RecognizeRequest recognizeRequest = getRecognizeRequestFromDtmfConfiguration(recognizeOptions);
return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context);
} else if (recognizeOptions instanceof CallMediaRecognizeChoiceOptions) {
RecognizeRequest recognizeRequest = getRecognizeRequestFromChoiceConfiguration(recognizeOptions);
return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context);
} else if (recognizeOptions instanceof CallMediaRecognizeSpeechOptions) {
RecognizeRequest recognizeRequest = getRecognizeRequestFromSpeechConfiguration(recognizeOptions);
return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context);
} else if (recognizeOptions instanceof CallMediaRecognizeSpeechOrDtmfOptions) {
RecognizeRequest recognizeRequest = getRecognizeRequestFromSpeechOrDtmfConfiguration(recognizeOptions);
return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context);
} else {
return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName()));
}
} catch (RuntimeException e) {
return monoError(logger, e);
}
}
/**
* Cancels all the queued media operations.
* @return Void
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> cancelAllMediaOperations() {
return cancelAllMediaOperationsWithResponse().then();
}
/**
* Cancels all the queued media operations
* @return Response for successful playAll request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> cancelAllMediaOperationsWithResponse() {
return cancelAllMediaOperationsWithResponseInternal(null);
}
Mono<Response<Void>> cancelAllMediaOperationsWithResponseInternal(Context context) {
try {
return withContext(contextValue -> {
contextValue = context == null ? contextValue : context;
return contentsInternal.cancelAllMediaOperationsWithResponseAsync(callConnectionId, contextValue);
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> playWithResponseInternal(PlayOptions options, Context context) {
try {
return withContext(contextValue -> {
contextValue = context == null ? contextValue : context;
PlayRequest request = getPlayRequest(options);
return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue);
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> playToAllWithResponseInternal(PlayToAllOptions options, Context context) {
try {
PlayToAllOptions playOptions = new PlayToAllOptions(options.getPlaySources());
playOptions.setLoop(options.isLoop());
playOptions.setInterruptCallMediaOperation(options.isInterruptCallMediaOperation());
playOptions.setOperationContext(options.getOperationContext());
playOptions.setOperationCallbackUrl(options.getOperationCallbackUrl());
return withContext(contextValue -> {
contextValue = context == null ? contextValue : context;
PlayRequest request = getPlayToAllRequest(options);
return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue);
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
PlayRequest getPlayRequest(PlayOptions options) {
List<PlaySourceInternal> playSourcesInternal = new ArrayList<>();
for (PlaySource source: options.getPlaySources()) {
PlaySourceInternal playSourceInternal = null;
if (source instanceof FileSource) {
playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) source);
} else if (source instanceof TextSource) {
playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) source);
} else if (source instanceof SsmlSource) {
playSourceInternal = getPlaySourceInternalFromSsmlSource((SsmlSource) source);
}
if (playSourceInternal != null && playSourceInternal.getKind() != null) {
playSourcesInternal.add(playSourceInternal);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(source.getClass().getCanonicalName()));
}
}
if (!playSourcesInternal.isEmpty()) {
PlayRequest request = new PlayRequest()
.setPlaySources(playSourcesInternal)
.setPlayTo(
options.getPlayTo()
.stream()
.map(CommunicationIdentifierConverter::convert)
.collect(Collectors.toList()));
request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop()));
request.setOperationContext(options.getOperationContext());
request.setOperationCallbackUri(options.getOperationCallbackUrl());
return request;
}
throw logger.logExceptionAsError(new IllegalArgumentException(options.getPlaySources().getClass().getCanonicalName()));
}
private PlaySourceInternal getPlaySourceInternalFromFileSource(FileSource playSource) {
FileSourceInternal fileSourceInternal = new FileSourceInternal().setUri(playSource.getUrl());
return new PlaySourceInternal()
.setKind(PlaySourceTypeInternal.FILE)
.setFile(fileSourceInternal)
.setPlaySourceCacheId(playSource.getPlaySourceCacheId());
}
private PlaySourceInternal getPlaySourceInternalFromTextSource(TextSource playSource) {
TextSourceInternal textSourceInternal = new TextSourceInternal().setText(playSource.getText());
if (playSource.getVoiceKind() != null) {
textSourceInternal.setVoiceKind(VoiceKindInternal.fromString(playSource.getVoiceKind().toString()));
}
if (playSource.getSourceLocale() != null) {
textSourceInternal.setSourceLocale(playSource.getSourceLocale());
}
if (playSource.getVoiceName() != null) {
textSourceInternal.setVoiceName(playSource.getVoiceName());
}
if (playSource.getCustomVoiceEndpointId() != null) {
textSourceInternal.setCustomVoiceEndpointId(playSource.getCustomVoiceEndpointId());
}
return new PlaySourceInternal()
.setKind(PlaySourceTypeInternal.TEXT)
.setText(textSourceInternal)
.setPlaySourceCacheId(playSource.getPlaySourceCacheId());
}
private PlaySourceInternal getPlaySourceInternalFromSsmlSource(SsmlSource playSource) {
SsmlSourceInternal ssmlSourceInternal = new SsmlSourceInternal().setSsmlText(playSource.getSsmlText());
if (playSource.getCustomVoiceEndpointId() != null) {
ssmlSourceInternal.setCustomVoiceEndpointId(playSource.getCustomVoiceEndpointId());
}
return new PlaySourceInternal()
.setKind(PlaySourceTypeInternal.SSML)
.setSsml(ssmlSourceInternal)
.setPlaySourceCacheId(playSource.getPlaySourceCacheId());
}
private PlaySourceInternal convertPlaySourceToPlaySourceInternal(PlaySource playSource) {
PlaySourceInternal playSourceInternal = new PlaySourceInternal();
if (playSource instanceof FileSource) {
playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource);
} else if (playSource instanceof TextSource) {
playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource);
} else if (playSource instanceof SsmlSource) {
playSourceInternal = getPlaySourceInternalFromSsmlSource((SsmlSource) playSource);
}
return playSourceInternal;
}
private List<RecognitionChoiceInternal> convertListRecognitionChoiceInternal(List<RecognitionChoice> recognitionChoices) {
return recognitionChoices.stream()
.map(this::convertRecognitionChoiceInternal)
.collect(Collectors.toList());
}
private RecognitionChoiceInternal convertRecognitionChoiceInternal(RecognitionChoice recognitionChoice) {
RecognitionChoiceInternal internalRecognitionChoice = new RecognitionChoiceInternal();
if (recognitionChoice.getLabel() != null) {
internalRecognitionChoice.setLabel(recognitionChoice.getLabel());
}
if (recognitionChoice.getPhrases() != null) {
internalRecognitionChoice.setPhrases(recognitionChoice.getPhrases());
}
if (recognitionChoice.getTone() != null) {
internalRecognitionChoice.setTone(convertDtmfToneInternal(recognitionChoice.getTone()));
}
return internalRecognitionChoice;
}
private DtmfToneInternal convertDtmfToneInternal(DtmfTone dtmfTone) {
return DtmfToneInternal.fromString(dtmfTone.toString());
}
private RecognizeRequest getRecognizeRequestFromDtmfConfiguration(CallMediaRecognizeOptions recognizeOptions) {
CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions;
DtmfOptionsInternal dtmfOptionsInternal = getDtmfOptionsInternal(
dtmfRecognizeOptions.getInterToneTimeout(),
dtmfRecognizeOptions.getMaxTonesToCollect(),
dtmfRecognizeOptions.getStopTones()
);
RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal()
.setDtmfOptions(dtmfOptionsInternal)
.setInterruptPrompt(recognizeOptions.isInterruptPrompt())
.setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant()));
recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds());
PlaySourceInternal playSourceInternal = getPlaySourceInternalFromRecognizeOptions(recognizeOptions);
RecognizeRequest recognizeRequest = new RecognizeRequest()
.setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString()))
.setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation())
.setPlayPrompt(playSourceInternal)
.setRecognizeOptions(recognizeOptionsInternal)
.setOperationContext(recognizeOptions.getOperationContext())
.setOperationCallbackUri(recognizeOptions.getOperationCallbackUrl());
return recognizeRequest;
}
private RecognizeRequest getRecognizeRequestFromChoiceConfiguration(CallMediaRecognizeOptions recognizeOptions) {
CallMediaRecognizeChoiceOptions choiceRecognizeOptions = (CallMediaRecognizeChoiceOptions) recognizeOptions;
RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal()
.setChoices(convertListRecognitionChoiceInternal(choiceRecognizeOptions.getChoices()))
.setInterruptPrompt(choiceRecognizeOptions.isInterruptPrompt())
.setTargetParticipant(CommunicationIdentifierConverter.convert(choiceRecognizeOptions.getTargetParticipant()));
recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) choiceRecognizeOptions.getInitialSilenceTimeout().getSeconds());
if (choiceRecognizeOptions.getSpeechLanguage() != null) {
if (!choiceRecognizeOptions.getSpeechLanguage().isEmpty()) {
recognizeOptionsInternal.setSpeechLanguage(choiceRecognizeOptions.getSpeechLanguage());
}
}
if (choiceRecognizeOptions.getSpeechRecognitionModelEndpointId() != null) {
if (!choiceRecognizeOptions.getSpeechRecognitionModelEndpointId().isEmpty()) {
recognizeOptionsInternal.setSpeechRecognitionModelEndpointId(choiceRecognizeOptions.getSpeechRecognitionModelEndpointId());
}
}
PlaySourceInternal playSourceInternal = getPlaySourceInternalFromRecognizeOptions(recognizeOptions);
RecognizeRequest recognizeRequest = new RecognizeRequest()
.setRecognizeInputType(RecognizeInputTypeInternal.fromString(choiceRecognizeOptions.getRecognizeInputType().toString()))
.setInterruptCallMediaOperation(choiceRecognizeOptions.isInterruptCallMediaOperation())
.setPlayPrompt(playSourceInternal)
.setRecognizeOptions(recognizeOptionsInternal)
.setOperationContext(recognizeOptions.getOperationContext())
.setOperationCallbackUri(recognizeOptions.getOperationCallbackUrl());
return recognizeRequest;
}
private RecognizeRequest getRecognizeRequestFromSpeechConfiguration(CallMediaRecognizeOptions recognizeOptions) {
CallMediaRecognizeSpeechOptions speechRecognizeOptions = (CallMediaRecognizeSpeechOptions) recognizeOptions;
SpeechOptionsInternal speechOptionsInternal = new SpeechOptionsInternal().setEndSilenceTimeoutInMs(speechRecognizeOptions.getEndSilenceTimeout().toMillis());
RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal()
.setSpeechOptions(speechOptionsInternal)
.setInterruptPrompt(speechRecognizeOptions.isInterruptPrompt())
.setTargetParticipant(CommunicationIdentifierConverter.convert(speechRecognizeOptions.getTargetParticipant()));
recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) speechRecognizeOptions.getInitialSilenceTimeout().getSeconds());
if (speechRecognizeOptions.getSpeechLanguage() != null) {
if (!speechRecognizeOptions.getSpeechLanguage().isEmpty()) {
recognizeOptionsInternal.setSpeechLanguage(speechRecognizeOptions.getSpeechLanguage());
}
}
if (speechRecognizeOptions.getSpeechRecognitionModelEndpointId() != null) {
if (!speechRecognizeOptions.getSpeechRecognitionModelEndpointId().isEmpty()) {
recognizeOptionsInternal.setSpeechRecognitionModelEndpointId(speechRecognizeOptions.getSpeechRecognitionModelEndpointId());
}
}
PlaySourceInternal playSourceInternal = getPlaySourceInternalFromRecognizeOptions(recognizeOptions);
RecognizeRequest recognizeRequest = new RecognizeRequest()
.setRecognizeInputType(RecognizeInputTypeInternal.fromString(speechRecognizeOptions.getRecognizeInputType().toString()))
.setInterruptCallMediaOperation(speechRecognizeOptions.isInterruptCallMediaOperation())
.setPlayPrompt(playSourceInternal)
.setRecognizeOptions(recognizeOptionsInternal)
.setOperationContext(recognizeOptions.getOperationContext())
.setOperationCallbackUri(recognizeOptions.getOperationCallbackUrl());
return recognizeRequest;
}
private RecognizeRequest getRecognizeRequestFromSpeechOrDtmfConfiguration(CallMediaRecognizeOptions recognizeOptions) {
CallMediaRecognizeSpeechOrDtmfOptions speechOrDtmfRecognizeOptions = (CallMediaRecognizeSpeechOrDtmfOptions) recognizeOptions;
DtmfOptionsInternal dtmfOptionsInternal = getDtmfOptionsInternal(
speechOrDtmfRecognizeOptions.getInterToneTimeout(),
speechOrDtmfRecognizeOptions.getMaxTonesToCollect(),
speechOrDtmfRecognizeOptions.getStopTones()
);
SpeechOptionsInternal speechOptionsInternal = new SpeechOptionsInternal().setEndSilenceTimeoutInMs(speechOrDtmfRecognizeOptions.getEndSilenceTimeout().toMillis());
RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal()
.setSpeechOptions(speechOptionsInternal)
.setDtmfOptions(dtmfOptionsInternal)
.setInterruptPrompt(speechOrDtmfRecognizeOptions.isInterruptPrompt())
.setTargetParticipant(CommunicationIdentifierConverter.convert(speechOrDtmfRecognizeOptions.getTargetParticipant()));
recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) speechOrDtmfRecognizeOptions.getInitialSilenceTimeout().getSeconds());
if (speechOrDtmfRecognizeOptions.getSpeechLanguage() != null) {
if (!speechOrDtmfRecognizeOptions.getSpeechLanguage().isEmpty()) {
recognizeOptionsInternal.setSpeechLanguage(speechOrDtmfRecognizeOptions.getSpeechLanguage());
}
}
if (speechOrDtmfRecognizeOptions.getSpeechRecognitionModelEndpointId() != null) {
if (!speechOrDtmfRecognizeOptions.getSpeechRecognitionModelEndpointId().isEmpty()) {
recognizeOptionsInternal.setSpeechRecognitionModelEndpointId(speechOrDtmfRecognizeOptions.getSpeechRecognitionModelEndpointId());
}
}
PlaySourceInternal playSourceInternal = getPlaySourceInternalFromRecognizeOptions(recognizeOptions);
RecognizeRequest recognizeRequest = new RecognizeRequest()
.setRecognizeInputType(RecognizeInputTypeInternal.fromString(speechOrDtmfRecognizeOptions.getRecognizeInputType().toString()))
.setInterruptCallMediaOperation(speechOrDtmfRecognizeOptions.isInterruptCallMediaOperation())
.setPlayPrompt(playSourceInternal)
.setRecognizeOptions(recognizeOptionsInternal)
.setOperationContext(recognizeOptions.getOperationContext())
.setOperationCallbackUri(recognizeOptions.getOperationCallbackUrl());
return recognizeRequest;
}
private DtmfOptionsInternal getDtmfOptionsInternal(Duration interToneTimeout, Integer maxTonesToCollect, List<DtmfTone> stopTones) {
DtmfOptionsInternal dtmfOptionsInternal = new DtmfOptionsInternal();
dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) interToneTimeout.getSeconds());
if (maxTonesToCollect != null) {
dtmfOptionsInternal.setMaxTonesToCollect(maxTonesToCollect);
}
if (stopTones != null) {
List<DtmfToneInternal> dtmfTones = stopTones.stream()
.map(this::convertDtmfToneInternal)
.collect(Collectors.toList());
dtmfOptionsInternal.setStopTones(dtmfTones);
}
return dtmfOptionsInternal;
}
private PlaySourceInternal getPlaySourceInternalFromRecognizeOptions(CallMediaRecognizeOptions recognizeOptions) {
PlaySourceInternal playSourceInternal = null;
if (recognizeOptions.getPlayPrompt() != null) {
PlaySource playSource = recognizeOptions.getPlayPrompt();
playSourceInternal = convertPlaySourceToPlaySourceInternal(playSource);
}
return playSourceInternal;
}
/**
* Send DTMF tones
*
* @param tones tones to be sent
* @param targetParticipant the target participant
* @return Response for successful sendDtmfTones request.
*/
public Mono<SendDtmfTonesResult> sendDtmfTones(List<DtmfTone> tones, CommunicationIdentifier targetParticipant) {
return sendDtmfTonesWithResponse(new SendDtmfTonesOptions(tones, targetParticipant)).flatMap(FluxUtil::toMono);
}
/**
* Send DTMF tones
*
* @param options SendDtmfTones configuration options
* @return Response for successful sendDtmfTones request.
*/
public Mono<Response<SendDtmfTonesResult>> sendDtmfTonesWithResponse(SendDtmfTonesOptions options) {
return withContext(context -> sendDtmfTonesWithResponseInternal(options, context));
}
Mono<Response<SendDtmfTonesResult>> sendDtmfTonesWithResponseInternal(SendDtmfTonesOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
SendDtmfTonesRequestInternal requestInternal = new SendDtmfTonesRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(options.getTargetParticipant()))
.setTones(options.getTones().stream()
.map(this::convertDtmfToneInternal)
.collect(Collectors.toList()))
.setOperationContext(options.getOperationContext())
.setOperationCallbackUri(options.getOperationCallbackUrl());
return contentsInternal.sendDtmfTonesWithResponseAsync(
callConnectionId,
requestInternal,
UUID.randomUUID(),
OffsetDateTime.now(),
context
).map(response -> new SimpleResponse<>(response, SendDtmfTonesResponseConstructorProxy.create(response.getValue())));
} catch (RuntimeException e) {
return monoError(logger, e);
}
}
/**
* Starts continuous Dtmf recognition.
* @param targetParticipant the target participant
* @return void
*/
public Mono<Void> startContinuousDtmfRecognition(CommunicationIdentifier targetParticipant) {
return startContinuousDtmfRecognitionWithResponse(new ContinuousDtmfRecognitionOptions(targetParticipant)).then();
}
/**
* Starts continuous Dtmf recognition.
* @param options ContinuousDtmfRecognition configuration options
* @return Response for successful start continuous dtmf recognition request.
*/
public Mono<Response<Void>> startContinuousDtmfRecognitionWithResponse(ContinuousDtmfRecognitionOptions options) {
return withContext(context -> startContinuousDtmfRecognitionWithResponseInternal(options, context));
}
Mono<Response<Void>> startContinuousDtmfRecognitionWithResponseInternal(ContinuousDtmfRecognitionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
ContinuousDtmfRecognitionRequestInternal requestInternal = new ContinuousDtmfRecognitionRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(options.getTargetParticipant()))
.setOperationContext(options.getOperationContext());
return contentsInternal.startContinuousDtmfRecognitionWithResponseAsync(callConnectionId, requestInternal, context);
} catch (RuntimeException e) {
return monoError(logger, e);
}
}
/**
* Stops continuous Dtmf recognition.
* @param targetParticipant the target participant
* @return void
*/
public Mono<Void> stopContinuousDtmfRecognition(CommunicationIdentifier targetParticipant) {
return stopContinuousDtmfRecognitionWithResponse(new ContinuousDtmfRecognitionOptions(targetParticipant)).then();
}
/**
* Stops continuous Dtmf recognition.
* @param options ContinuousDtmfRecognition configuration options
* @return Response for successful stop continuous dtmf recognition request.
*/
public Mono<Response<Void>> stopContinuousDtmfRecognitionWithResponse(ContinuousDtmfRecognitionOptions options) {
return withContext(context -> stopContinuousDtmfRecognitionWithResponseInternal(options, context));
}
Mono<Response<Void>> stopContinuousDtmfRecognitionWithResponseInternal(ContinuousDtmfRecognitionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
ContinuousDtmfRecognitionRequestInternal requestInternal = new ContinuousDtmfRecognitionRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(options.getTargetParticipant()))
.setOperationContext(options.getOperationContext())
.setOperationCallbackUri(options.getOperationCallbackUrl());
return contentsInternal.stopContinuousDtmfRecognitionWithResponseAsync(callConnectionId, requestInternal, context);
} catch (RuntimeException e) {
return monoError(logger, e);
}
}
/**
* Holds participant in call.
* @param targetParticipant the target.
* @param playSourceInfo audio to play.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> startHoldMusic(CommunicationIdentifier targetParticipant,
PlaySource playSourceInfo) {
return startHoldMusicWithResponseInternal(
new StartHoldMusicOptions(targetParticipant, playSourceInfo),
Context.NONE).then();
}
/**
* Holds participant in call.
* @param options - Different options to pass to the request.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> startHoldMusicWithResponse(StartHoldMusicOptions options) {
return withContext(context -> startHoldMusicWithResponseInternal(
options, context));
}
Mono<Response<Void>> startHoldMusicWithResponseInternal(StartHoldMusicOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
StartHoldMusicRequestInternal request = new StartHoldMusicRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(options.getTargetParticipant()))
.setPlaySourceInfo(convertPlaySourceToPlaySourceInternal(options.getPlaySourceInfo()))
.setLoop(options.isLoop())
.setOperationContext(options.getOperationContext());
return contentsInternal
.startHoldMusicWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Removes hold from participant in call.
* @param targetParticipant the target.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> stopHoldMusic(CommunicationIdentifier targetParticipant) {
return stopHoldMusicWithResponse(targetParticipant, null).then();
}
/**
* Holds participant in call.
* @param targetParticipant the target.
* @param operationContext Operational context.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> stopHoldMusicWithResponse(CommunicationIdentifier targetParticipant,
String operationContext) {
return withContext(context -> stopHoldMusicWithResponseInternal(targetParticipant, operationContext, context));
}
Mono<Response<Void>> stopHoldMusicWithResponseInternal(CommunicationIdentifier targetParticipant,
String operationContext,
Context context) {
try {
context = context == null ? Context.NONE : context;
StopHoldMusicRequestInternal request = new StopHoldMusicRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(targetParticipant))
.setOperationContext(operationContext);
return contentsInternal
.stopHoldMusicWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Starts transcription in the call.
*
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> startTranscription() {
return startTranscriptionWithResponseAsync(null).then();
}
/**
* Starts transcription in the call with options.
*
* @param options Options for the Start Transcription operation.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> startTranscriptionWithResponseAsync(StartTranscriptionOptions options) {
return withContext(context -> startTranscriptionWithResponseInternal(options, context));
}
Mono<Response<Void>> startTranscriptionWithResponseInternal(StartTranscriptionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
StartTranscriptionRequestInternal request = new StartTranscriptionRequestInternal();
if (options != null) {
request.setLocale(options.getLocale());
request.setOperationContext(options.getOperationContext());
}
return contentsInternal
.startTranscriptionWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Stops transcription in the call.
*
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> stopTranscription() {
return stopTranscriptionWithResponseAsync(null).then();
}
/**
* Stops transcription in the call with options.
*
* @param options Options for the Stop Transcription operation.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> stopTranscriptionWithResponseAsync(StopTranscriptionOptions options) {
return withContext(context -> stopTranscriptionWithResponseInternal(options, context));
}
Mono<Response<Void>> stopTranscriptionWithResponseInternal(StopTranscriptionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
StopTranscriptionRequestInternal request = new StopTranscriptionRequestInternal();
if (options != null) {
request.setOperationContext(options.getOperationContext());
}
return contentsInternal
.stopTranscriptionWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Updates transcription language
*
* @param locale Defines new locale for transcription.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> updateTranscription(String locale) {
return withContext(context -> updateTranscriptionWithResponseInternal(locale, context)).then();
}
Mono<Response<Void>> updateTranscriptionWithResponseInternal(String locale, Context context) {
try {
context = context == null ? Context.NONE : context;
UpdateTranscriptionRequestInternal request = new UpdateTranscriptionRequestInternal();
request.setLocale(locale);
return contentsInternal
.updateTranscriptionWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} | class CallMediaAsync {
private final CallMediasImpl contentsInternal;
private final String callConnectionId;
private final ClientLogger logger;
CallMediaAsync(String callConnectionId, CallMediasImpl contentsInternal) {
this.callConnectionId = callConnectionId;
this.contentsInternal = contentsInternal;
this.logger = new ClientLogger(CallMediaAsync.class);
}
/**
* Play
*
* @param playSources A List of {@link PlaySource} representing the sources to play.
* @param playTo the targets to play to
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return Void for successful play request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> play(List<PlaySource> playSources, List<CommunicationIdentifier> playTo) {
PlayOptions options = new PlayOptions(playSources, playTo);
return playWithResponse(options).flatMap(FluxUtil::toMono);
}
/**
* Play
*
* @param playSource A {@link PlaySource} representing the source to play.
* @param playTo the targets to play to
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return Void for successful play request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> play(PlaySource playSource, List<CommunicationIdentifier> playTo) {
PlayOptions options = new PlayOptions(playSource, playTo);
return playWithResponse(options).flatMap(FluxUtil::toMono);
}
/**
* Play to all participants
*
* @param playSources A List of {@link PlaySource} representing the sources to play.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return Void for successful playAll request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> playToAll(List<PlaySource> playSources) {
PlayToAllOptions options = new PlayToAllOptions(playSources);
return playToAllWithResponse(options).flatMap(FluxUtil::toMono);
}
/**
* Play to all participants
*
* @param playSource A {@link PlaySource} representing the source to play.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return Void for successful playAll request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> playToAll(PlaySource playSource) {
PlayToAllOptions options = new PlayToAllOptions(playSource);
return playToAllWithResponse(options).flatMap(FluxUtil::toMono);
}
/**
* Play
*
* @param options play options.
* @return Response for successful play request.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> playWithResponse(PlayOptions options) {
return playWithResponseInternal(options, null);
}
/**
* Play to all participants
*
* @param options play to all options.
* @return Response for successful playAll request.
* @throws HttpResponseException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> playToAllWithResponse(PlayToAllOptions options) {
return playToAllWithResponseInternal(options, null);
}
/**
* Recognize operation.
* @param recognizeOptions Different attributes for recognize.
* @return Response for successful recognize request.
*/
public Mono<Void> startRecognizing(CallMediaRecognizeOptions recognizeOptions) {
return startRecognizingWithResponse(recognizeOptions).then();
}
/**
* Recognize operation
* @param recognizeOptions Different attributes for recognize.
* @return Response for successful recognize request.
*/
public Mono<Response<Void>> startRecognizingWithResponse(CallMediaRecognizeOptions recognizeOptions) {
return withContext(context -> recognizeWithResponseInternal(recognizeOptions, context));
}
Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) {
try {
context = context == null ? Context.NONE : context;
if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) {
RecognizeRequest recognizeRequest = getRecognizeRequestFromDtmfConfiguration(recognizeOptions);
return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context);
} else if (recognizeOptions instanceof CallMediaRecognizeChoiceOptions) {
RecognizeRequest recognizeRequest = getRecognizeRequestFromChoiceConfiguration(recognizeOptions);
return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context);
} else if (recognizeOptions instanceof CallMediaRecognizeSpeechOptions) {
RecognizeRequest recognizeRequest = getRecognizeRequestFromSpeechConfiguration(recognizeOptions);
return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context);
} else if (recognizeOptions instanceof CallMediaRecognizeSpeechOrDtmfOptions) {
RecognizeRequest recognizeRequest = getRecognizeRequestFromSpeechOrDtmfConfiguration(recognizeOptions);
return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context);
} else {
return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName()));
}
} catch (RuntimeException e) {
return monoError(logger, e);
}
}
/**
* Cancels all the queued media operations.
* @return Void
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> cancelAllMediaOperations() {
return cancelAllMediaOperationsWithResponse().then();
}
/**
* Cancels all the queued media operations
* @return Response for successful playAll request.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> cancelAllMediaOperationsWithResponse() {
return cancelAllMediaOperationsWithResponseInternal(null);
}
Mono<Response<Void>> cancelAllMediaOperationsWithResponseInternal(Context context) {
try {
return withContext(contextValue -> {
contextValue = context == null ? contextValue : context;
return contentsInternal.cancelAllMediaOperationsWithResponseAsync(callConnectionId, contextValue);
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> playWithResponseInternal(PlayOptions options, Context context) {
try {
return withContext(contextValue -> {
contextValue = context == null ? contextValue : context;
PlayRequest request = getPlayRequest(options);
return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue);
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<Void>> playToAllWithResponseInternal(PlayToAllOptions options, Context context) {
try {
PlayToAllOptions playOptions = new PlayToAllOptions(options.getPlaySources());
playOptions.setLoop(options.isLoop());
playOptions.setInterruptCallMediaOperation(options.isInterruptCallMediaOperation());
playOptions.setOperationContext(options.getOperationContext());
playOptions.setOperationCallbackUrl(options.getOperationCallbackUrl());
return withContext(contextValue -> {
contextValue = context == null ? contextValue : context;
PlayRequest request = getPlayToAllRequest(options);
return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue);
});
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
PlayRequest getPlayRequest(PlayOptions options) {
List<PlaySourceInternal> playSourcesInternal = new ArrayList<>();
for (PlaySource source: options.getPlaySources()) {
PlaySourceInternal playSourceInternal = null;
if (source instanceof FileSource) {
playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) source);
} else if (source instanceof TextSource) {
playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) source);
} else if (source instanceof SsmlSource) {
playSourceInternal = getPlaySourceInternalFromSsmlSource((SsmlSource) source);
}
if (playSourceInternal != null && playSourceInternal.getKind() != null) {
playSourcesInternal.add(playSourceInternal);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(source.getClass().getCanonicalName()));
}
}
if (!playSourcesInternal.isEmpty()) {
PlayRequest request = new PlayRequest()
.setPlaySources(playSourcesInternal)
.setPlayTo(
options.getPlayTo()
.stream()
.map(CommunicationIdentifierConverter::convert)
.collect(Collectors.toList()));
request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop()));
request.setOperationContext(options.getOperationContext());
request.setOperationCallbackUri(options.getOperationCallbackUrl());
return request;
}
throw logger.logExceptionAsError(new IllegalArgumentException(options.getPlaySources().getClass().getCanonicalName()));
}
private PlaySourceInternal getPlaySourceInternalFromFileSource(FileSource playSource) {
FileSourceInternal fileSourceInternal = new FileSourceInternal().setUri(playSource.getUrl());
return new PlaySourceInternal()
.setKind(PlaySourceTypeInternal.FILE)
.setFile(fileSourceInternal)
.setPlaySourceCacheId(playSource.getPlaySourceCacheId());
}
private PlaySourceInternal getPlaySourceInternalFromTextSource(TextSource playSource) {
TextSourceInternal textSourceInternal = new TextSourceInternal().setText(playSource.getText());
if (playSource.getVoiceKind() != null) {
textSourceInternal.setVoiceKind(VoiceKindInternal.fromString(playSource.getVoiceKind().toString()));
}
if (playSource.getSourceLocale() != null) {
textSourceInternal.setSourceLocale(playSource.getSourceLocale());
}
if (playSource.getVoiceName() != null) {
textSourceInternal.setVoiceName(playSource.getVoiceName());
}
if (playSource.getCustomVoiceEndpointId() != null) {
textSourceInternal.setCustomVoiceEndpointId(playSource.getCustomVoiceEndpointId());
}
return new PlaySourceInternal()
.setKind(PlaySourceTypeInternal.TEXT)
.setText(textSourceInternal)
.setPlaySourceCacheId(playSource.getPlaySourceCacheId());
}
private PlaySourceInternal getPlaySourceInternalFromSsmlSource(SsmlSource playSource) {
SsmlSourceInternal ssmlSourceInternal = new SsmlSourceInternal().setSsmlText(playSource.getSsmlText());
if (playSource.getCustomVoiceEndpointId() != null) {
ssmlSourceInternal.setCustomVoiceEndpointId(playSource.getCustomVoiceEndpointId());
}
return new PlaySourceInternal()
.setKind(PlaySourceTypeInternal.SSML)
.setSsml(ssmlSourceInternal)
.setPlaySourceCacheId(playSource.getPlaySourceCacheId());
}
private PlaySourceInternal convertPlaySourceToPlaySourceInternal(PlaySource playSource) {
PlaySourceInternal playSourceInternal = new PlaySourceInternal();
if (playSource instanceof FileSource) {
playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource);
} else if (playSource instanceof TextSource) {
playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource);
} else if (playSource instanceof SsmlSource) {
playSourceInternal = getPlaySourceInternalFromSsmlSource((SsmlSource) playSource);
}
return playSourceInternal;
}
private List<RecognitionChoiceInternal> convertListRecognitionChoiceInternal(List<RecognitionChoice> recognitionChoices) {
return recognitionChoices.stream()
.map(this::convertRecognitionChoiceInternal)
.collect(Collectors.toList());
}
private RecognitionChoiceInternal convertRecognitionChoiceInternal(RecognitionChoice recognitionChoice) {
RecognitionChoiceInternal internalRecognitionChoice = new RecognitionChoiceInternal();
if (recognitionChoice.getLabel() != null) {
internalRecognitionChoice.setLabel(recognitionChoice.getLabel());
}
if (recognitionChoice.getPhrases() != null) {
internalRecognitionChoice.setPhrases(recognitionChoice.getPhrases());
}
if (recognitionChoice.getTone() != null) {
internalRecognitionChoice.setTone(convertDtmfToneInternal(recognitionChoice.getTone()));
}
return internalRecognitionChoice;
}
private DtmfToneInternal convertDtmfToneInternal(DtmfTone dtmfTone) {
return DtmfToneInternal.fromString(dtmfTone.toString());
}
private RecognizeRequest getRecognizeRequestFromDtmfConfiguration(CallMediaRecognizeOptions recognizeOptions) {
CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions;
DtmfOptionsInternal dtmfOptionsInternal = getDtmfOptionsInternal(
dtmfRecognizeOptions.getInterToneTimeout(),
dtmfRecognizeOptions.getMaxTonesToCollect(),
dtmfRecognizeOptions.getStopTones()
);
RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal()
.setDtmfOptions(dtmfOptionsInternal)
.setInterruptPrompt(recognizeOptions.isInterruptPrompt())
.setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant()));
recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds());
PlaySourceInternal playSourceInternal = getPlaySourceInternalFromRecognizeOptions(recognizeOptions);
RecognizeRequest recognizeRequest = new RecognizeRequest()
.setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString()))
.setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation())
.setPlayPrompt(playSourceInternal)
.setRecognizeOptions(recognizeOptionsInternal)
.setOperationContext(recognizeOptions.getOperationContext())
.setOperationCallbackUri(recognizeOptions.getOperationCallbackUrl());
return recognizeRequest;
}
private RecognizeRequest getRecognizeRequestFromChoiceConfiguration(CallMediaRecognizeOptions recognizeOptions) {
CallMediaRecognizeChoiceOptions choiceRecognizeOptions = (CallMediaRecognizeChoiceOptions) recognizeOptions;
RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal()
.setChoices(convertListRecognitionChoiceInternal(choiceRecognizeOptions.getChoices()))
.setInterruptPrompt(choiceRecognizeOptions.isInterruptPrompt())
.setTargetParticipant(CommunicationIdentifierConverter.convert(choiceRecognizeOptions.getTargetParticipant()));
recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) choiceRecognizeOptions.getInitialSilenceTimeout().getSeconds());
if (choiceRecognizeOptions.getSpeechLanguage() != null) {
if (!choiceRecognizeOptions.getSpeechLanguage().isEmpty()) {
recognizeOptionsInternal.setSpeechLanguage(choiceRecognizeOptions.getSpeechLanguage());
}
}
if (choiceRecognizeOptions.getSpeechRecognitionModelEndpointId() != null) {
if (!choiceRecognizeOptions.getSpeechRecognitionModelEndpointId().isEmpty()) {
recognizeOptionsInternal.setSpeechRecognitionModelEndpointId(choiceRecognizeOptions.getSpeechRecognitionModelEndpointId());
}
}
PlaySourceInternal playSourceInternal = getPlaySourceInternalFromRecognizeOptions(recognizeOptions);
RecognizeRequest recognizeRequest = new RecognizeRequest()
.setRecognizeInputType(RecognizeInputTypeInternal.fromString(choiceRecognizeOptions.getRecognizeInputType().toString()))
.setInterruptCallMediaOperation(choiceRecognizeOptions.isInterruptCallMediaOperation())
.setPlayPrompt(playSourceInternal)
.setRecognizeOptions(recognizeOptionsInternal)
.setOperationContext(recognizeOptions.getOperationContext())
.setOperationCallbackUri(recognizeOptions.getOperationCallbackUrl());
return recognizeRequest;
}
private RecognizeRequest getRecognizeRequestFromSpeechConfiguration(CallMediaRecognizeOptions recognizeOptions) {
CallMediaRecognizeSpeechOptions speechRecognizeOptions = (CallMediaRecognizeSpeechOptions) recognizeOptions;
SpeechOptionsInternal speechOptionsInternal = new SpeechOptionsInternal().setEndSilenceTimeoutInMs(speechRecognizeOptions.getEndSilenceTimeout().toMillis());
RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal()
.setSpeechOptions(speechOptionsInternal)
.setInterruptPrompt(speechRecognizeOptions.isInterruptPrompt())
.setTargetParticipant(CommunicationIdentifierConverter.convert(speechRecognizeOptions.getTargetParticipant()));
recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) speechRecognizeOptions.getInitialSilenceTimeout().getSeconds());
if (speechRecognizeOptions.getSpeechLanguage() != null) {
if (!speechRecognizeOptions.getSpeechLanguage().isEmpty()) {
recognizeOptionsInternal.setSpeechLanguage(speechRecognizeOptions.getSpeechLanguage());
}
}
if (speechRecognizeOptions.getSpeechRecognitionModelEndpointId() != null) {
if (!speechRecognizeOptions.getSpeechRecognitionModelEndpointId().isEmpty()) {
recognizeOptionsInternal.setSpeechRecognitionModelEndpointId(speechRecognizeOptions.getSpeechRecognitionModelEndpointId());
}
}
PlaySourceInternal playSourceInternal = getPlaySourceInternalFromRecognizeOptions(recognizeOptions);
RecognizeRequest recognizeRequest = new RecognizeRequest()
.setRecognizeInputType(RecognizeInputTypeInternal.fromString(speechRecognizeOptions.getRecognizeInputType().toString()))
.setInterruptCallMediaOperation(speechRecognizeOptions.isInterruptCallMediaOperation())
.setPlayPrompt(playSourceInternal)
.setRecognizeOptions(recognizeOptionsInternal)
.setOperationContext(recognizeOptions.getOperationContext())
.setOperationCallbackUri(recognizeOptions.getOperationCallbackUrl());
return recognizeRequest;
}
private RecognizeRequest getRecognizeRequestFromSpeechOrDtmfConfiguration(CallMediaRecognizeOptions recognizeOptions) {
CallMediaRecognizeSpeechOrDtmfOptions speechOrDtmfRecognizeOptions = (CallMediaRecognizeSpeechOrDtmfOptions) recognizeOptions;
DtmfOptionsInternal dtmfOptionsInternal = getDtmfOptionsInternal(
speechOrDtmfRecognizeOptions.getInterToneTimeout(),
speechOrDtmfRecognizeOptions.getMaxTonesToCollect(),
speechOrDtmfRecognizeOptions.getStopTones()
);
SpeechOptionsInternal speechOptionsInternal = new SpeechOptionsInternal().setEndSilenceTimeoutInMs(speechOrDtmfRecognizeOptions.getEndSilenceTimeout().toMillis());
RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal()
.setSpeechOptions(speechOptionsInternal)
.setDtmfOptions(dtmfOptionsInternal)
.setInterruptPrompt(speechOrDtmfRecognizeOptions.isInterruptPrompt())
.setTargetParticipant(CommunicationIdentifierConverter.convert(speechOrDtmfRecognizeOptions.getTargetParticipant()));
recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) speechOrDtmfRecognizeOptions.getInitialSilenceTimeout().getSeconds());
if (speechOrDtmfRecognizeOptions.getSpeechLanguage() != null) {
if (!speechOrDtmfRecognizeOptions.getSpeechLanguage().isEmpty()) {
recognizeOptionsInternal.setSpeechLanguage(speechOrDtmfRecognizeOptions.getSpeechLanguage());
}
}
if (speechOrDtmfRecognizeOptions.getSpeechRecognitionModelEndpointId() != null) {
if (!speechOrDtmfRecognizeOptions.getSpeechRecognitionModelEndpointId().isEmpty()) {
recognizeOptionsInternal.setSpeechRecognitionModelEndpointId(speechOrDtmfRecognizeOptions.getSpeechRecognitionModelEndpointId());
}
}
PlaySourceInternal playSourceInternal = getPlaySourceInternalFromRecognizeOptions(recognizeOptions);
RecognizeRequest recognizeRequest = new RecognizeRequest()
.setRecognizeInputType(RecognizeInputTypeInternal.fromString(speechOrDtmfRecognizeOptions.getRecognizeInputType().toString()))
.setInterruptCallMediaOperation(speechOrDtmfRecognizeOptions.isInterruptCallMediaOperation())
.setPlayPrompt(playSourceInternal)
.setRecognizeOptions(recognizeOptionsInternal)
.setOperationContext(recognizeOptions.getOperationContext())
.setOperationCallbackUri(recognizeOptions.getOperationCallbackUrl());
return recognizeRequest;
}
private DtmfOptionsInternal getDtmfOptionsInternal(Duration interToneTimeout, Integer maxTonesToCollect, List<DtmfTone> stopTones) {
DtmfOptionsInternal dtmfOptionsInternal = new DtmfOptionsInternal();
dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) interToneTimeout.getSeconds());
if (maxTonesToCollect != null) {
dtmfOptionsInternal.setMaxTonesToCollect(maxTonesToCollect);
}
if (stopTones != null) {
List<DtmfToneInternal> dtmfTones = stopTones.stream()
.map(this::convertDtmfToneInternal)
.collect(Collectors.toList());
dtmfOptionsInternal.setStopTones(dtmfTones);
}
return dtmfOptionsInternal;
}
private PlaySourceInternal getPlaySourceInternalFromRecognizeOptions(CallMediaRecognizeOptions recognizeOptions) {
PlaySourceInternal playSourceInternal = null;
if (recognizeOptions.getPlayPrompt() != null) {
PlaySource playSource = recognizeOptions.getPlayPrompt();
playSourceInternal = convertPlaySourceToPlaySourceInternal(playSource);
}
return playSourceInternal;
}
/**
* Send DTMF tones
*
* @param tones tones to be sent
* @param targetParticipant the target participant
* @return Response for successful sendDtmfTones request.
*/
public Mono<SendDtmfTonesResult> sendDtmfTones(List<DtmfTone> tones, CommunicationIdentifier targetParticipant) {
return sendDtmfTonesWithResponse(new SendDtmfTonesOptions(tones, targetParticipant)).flatMap(FluxUtil::toMono);
}
/**
* Send DTMF tones
*
* @param options SendDtmfTones configuration options
* @return Response for successful sendDtmfTones request.
*/
public Mono<Response<SendDtmfTonesResult>> sendDtmfTonesWithResponse(SendDtmfTonesOptions options) {
return withContext(context -> sendDtmfTonesWithResponseInternal(options, context));
}
Mono<Response<SendDtmfTonesResult>> sendDtmfTonesWithResponseInternal(SendDtmfTonesOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
SendDtmfTonesRequestInternal requestInternal = new SendDtmfTonesRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(options.getTargetParticipant()))
.setTones(options.getTones().stream()
.map(this::convertDtmfToneInternal)
.collect(Collectors.toList()))
.setOperationContext(options.getOperationContext())
.setOperationCallbackUri(options.getOperationCallbackUrl());
return contentsInternal.sendDtmfTonesWithResponseAsync(
callConnectionId,
requestInternal,
UUID.randomUUID(),
OffsetDateTime.now(),
context
).map(response -> new SimpleResponse<>(response, SendDtmfTonesResponseConstructorProxy.create(response.getValue())));
} catch (RuntimeException e) {
return monoError(logger, e);
}
}
/**
* Starts continuous Dtmf recognition.
* @param targetParticipant the target participant
* @return void
*/
public Mono<Void> startContinuousDtmfRecognition(CommunicationIdentifier targetParticipant) {
return startContinuousDtmfRecognitionWithResponse(new ContinuousDtmfRecognitionOptions(targetParticipant)).then();
}
/**
* Starts continuous Dtmf recognition.
* @param options ContinuousDtmfRecognition configuration options
* @return Response for successful start continuous dtmf recognition request.
*/
public Mono<Response<Void>> startContinuousDtmfRecognitionWithResponse(ContinuousDtmfRecognitionOptions options) {
return withContext(context -> startContinuousDtmfRecognitionWithResponseInternal(options, context));
}
Mono<Response<Void>> startContinuousDtmfRecognitionWithResponseInternal(ContinuousDtmfRecognitionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
ContinuousDtmfRecognitionRequestInternal requestInternal = new ContinuousDtmfRecognitionRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(options.getTargetParticipant()))
.setOperationContext(options.getOperationContext());
return contentsInternal.startContinuousDtmfRecognitionWithResponseAsync(callConnectionId, requestInternal, context);
} catch (RuntimeException e) {
return monoError(logger, e);
}
}
/**
* Stops continuous Dtmf recognition.
* @param targetParticipant the target participant
* @return void
*/
public Mono<Void> stopContinuousDtmfRecognition(CommunicationIdentifier targetParticipant) {
return stopContinuousDtmfRecognitionWithResponse(new ContinuousDtmfRecognitionOptions(targetParticipant)).then();
}
/**
* Stops continuous Dtmf recognition.
* @param options ContinuousDtmfRecognition configuration options
* @return Response for successful stop continuous dtmf recognition request.
*/
public Mono<Response<Void>> stopContinuousDtmfRecognitionWithResponse(ContinuousDtmfRecognitionOptions options) {
return withContext(context -> stopContinuousDtmfRecognitionWithResponseInternal(options, context));
}
Mono<Response<Void>> stopContinuousDtmfRecognitionWithResponseInternal(ContinuousDtmfRecognitionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
ContinuousDtmfRecognitionRequestInternal requestInternal = new ContinuousDtmfRecognitionRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(options.getTargetParticipant()))
.setOperationContext(options.getOperationContext())
.setOperationCallbackUri(options.getOperationCallbackUrl());
return contentsInternal.stopContinuousDtmfRecognitionWithResponseAsync(callConnectionId, requestInternal, context);
} catch (RuntimeException e) {
return monoError(logger, e);
}
}
/**
* Holds participant in call.
* @param targetParticipant the target.
* @param playSourceInfo audio to play.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> startHoldMusic(CommunicationIdentifier targetParticipant,
PlaySource playSourceInfo) {
return startHoldMusicWithResponseInternal(
new StartHoldMusicOptions(targetParticipant, playSourceInfo),
Context.NONE).then();
}
/**
* Holds participant in call.
* @param options - Different options to pass to the request.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> startHoldMusicWithResponse(StartHoldMusicOptions options) {
return withContext(context -> startHoldMusicWithResponseInternal(
options, context));
}
Mono<Response<Void>> startHoldMusicWithResponseInternal(StartHoldMusicOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
StartHoldMusicRequestInternal request = new StartHoldMusicRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(options.getTargetParticipant()))
.setPlaySourceInfo(convertPlaySourceToPlaySourceInternal(options.getPlaySourceInfo()))
.setLoop(options.isLoop())
.setOperationContext(options.getOperationContext());
return contentsInternal
.startHoldMusicWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Removes hold from participant in call.
* @param targetParticipant the target.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> stopHoldMusic(CommunicationIdentifier targetParticipant) {
return stopHoldMusicWithResponse(targetParticipant, null).then();
}
/**
* Holds participant in call.
* @param targetParticipant the target.
* @param operationContext Operational context.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> stopHoldMusicWithResponse(CommunicationIdentifier targetParticipant,
String operationContext) {
return withContext(context -> stopHoldMusicWithResponseInternal(targetParticipant, operationContext, context));
}
Mono<Response<Void>> stopHoldMusicWithResponseInternal(CommunicationIdentifier targetParticipant,
String operationContext,
Context context) {
try {
context = context == null ? Context.NONE : context;
StopHoldMusicRequestInternal request = new StopHoldMusicRequestInternal()
.setTargetParticipant(CommunicationIdentifierConverter.convert(targetParticipant))
.setOperationContext(operationContext);
return contentsInternal
.stopHoldMusicWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Starts transcription in the call.
*
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> startTranscription() {
return startTranscriptionWithResponseAsync(null).then();
}
/**
* Starts transcription in the call with options.
*
* @param options Options for the Start Transcription operation.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> startTranscriptionWithResponseAsync(StartTranscriptionOptions options) {
return withContext(context -> startTranscriptionWithResponseInternal(options, context));
}
Mono<Response<Void>> startTranscriptionWithResponseInternal(StartTranscriptionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
StartTranscriptionRequestInternal request = new StartTranscriptionRequestInternal();
if (options != null) {
request.setLocale(options.getLocale());
request.setOperationContext(options.getOperationContext());
}
return contentsInternal
.startTranscriptionWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Stops transcription in the call.
*
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> stopTranscription() {
return stopTranscriptionWithResponseAsync(null).then();
}
/**
* Stops transcription in the call with options.
*
* @param options Options for the Stop Transcription operation.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> stopTranscriptionWithResponseAsync(StopTranscriptionOptions options) {
return withContext(context -> stopTranscriptionWithResponseInternal(options, context));
}
Mono<Response<Void>> stopTranscriptionWithResponseInternal(StopTranscriptionOptions options, Context context) {
try {
context = context == null ? Context.NONE : context;
StopTranscriptionRequestInternal request = new StopTranscriptionRequestInternal();
if (options != null) {
request.setOperationContext(options.getOperationContext());
}
return contentsInternal
.stopTranscriptionWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Updates transcription language
*
* @param locale Defines new locale for transcription.
* @return Response for successful operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> updateTranscription(String locale) {
return withContext(context -> updateTranscriptionWithResponseInternal(locale, context)).then();
}
Mono<Response<Void>> updateTranscriptionWithResponseInternal(String locale, Context context) {
try {
context = context == null ? Context.NONE : context;
UpdateTranscriptionRequestInternal request = new UpdateTranscriptionRequestInternal();
request.setLocale(locale);
return contentsInternal
.updateTranscriptionWithResponseAsync(callConnectionId, request, context);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
} |
nit: pretty :) | static void validateHealthcareEntity(HealthcareEntity expected, HealthcareEntity actual) {
assertEquals(expected.getCategory(), actual.getCategory());
assertNotNull(actual.getText());
if (expected.getNormalizedText() != null) {
assertNotNull(actual.getNormalizedText());
}
if (expected.getSubcategory() != null) {
assertNotNull(actual.getSubcategory());
}
validateEntityAssertion(expected.getAssertion(), actual.getAssertion());
validateEntityDataSourceList(expected.getDataSources(), actual.getDataSources());
} | static void validateHealthcareEntity(HealthcareEntity expected, HealthcareEntity actual) {
assertEquals(expected.getCategory(), actual.getCategory());
assertNotNull(actual.getText());
if (expected.getNormalizedText() != null) {
assertNotNull(actual.getNormalizedText());
}
if (expected.getSubcategory() != null) {
assertNotNull(actual.getSubcategory());
}
validateEntityAssertion(expected.getAssertion(), actual.getAssertion());
validateEntityDataSourceList(expected.getDataSources(), actual.getDataSources());
} | class TextAnalyticsClientTestBase extends TestProxyTestBase {
static final String BATCH_ERROR_EXCEPTION_MESSAGE = "Error in accessing the property on document id: 2, when %s returned with an error: Document text is empty. ErrorCodeValue: {InvalidDocument}";
static final String INVALID_DOCUMENT_BATCH_NPE_MESSAGE = "'documents' cannot be null.";
static final String INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE = "'documents' cannot be empty.";
static final String INVALID_DOCUMENT_NPE_MESSAGE = "'document' cannot be null.";
static final String REDACTED = "REDACTED";
static final String AZURE_TEXT_ANALYTICS_ENDPOINT =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_ENDPOINT =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_ENDPOINT");
static final String AZURE_TEXT_ANALYTICS_API_KEY =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_API_KEY");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_API_KEY =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_API_KEY");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_PROJECT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_PROJECT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_DEPLOYMENT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_DEPLOYMENT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_PROJECT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_PROJECT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_DEPLOYMENT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_DEPLOYMENT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_PROJECT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_PROJECT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_DEPLOYMENT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_DEPLOYMENT_NAME");
static InterceptorManager interceptorManagerTestBase;
Duration durationTestMode;
/**
* Use duration of nearly zero value for PLAYBACK test mode, otherwise, use default duration value for LIVE mode.
*/
@Override
protected void beforeTest() {
if (interceptorManager.isPlaybackMode()) {
durationTestMode = Duration.ofMillis(1);
} else {
durationTestMode = DEFAULT_POLL_INTERVAL;
}
interceptorManagerTestBase = interceptorManager;
}
protected <T, U> SyncPoller<T, U> setPollInterval(SyncPoller<T, U> syncPoller) {
return syncPoller.setPollInterval(durationTestMode);
}
@Test
abstract void detectLanguagesBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguagesBatchListCountryHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesBatchInputSingleError(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForBatchStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForListLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForListWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForDomainFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForTextInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForBatchStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForListLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmoji(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsWithMultiSameKindActions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeCustomEntitiesAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void singleLabelClassificationAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void multiCategoryClassifyAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void singleLabelClassificationStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void multiLabelClassificationStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(
HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionWithNonDefaultSentenceCount(
HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(
HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
void detectLanguageShowStatisticsRunner(BiConsumer<List<DetectLanguageInput>,
TextAnalyticsRequestOptions> testRunner) {
final List<DetectLanguageInput> detectLanguageInputs = TestUtils.getDetectLanguageInputs();
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(detectLanguageInputs, options);
}
void detectLanguageDuplicateIdRunner(BiConsumer<List<DetectLanguageInput>,
TextAnalyticsRequestOptions> testRunner) {
testRunner.accept(TestUtils.getDuplicateIdDetectLanguageInputs(), null);
}
void detectLanguagesCountryHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS, "US");
}
void detectLanguagesBatchListCountryHintWithOptionsRunner(BiConsumer<List<String>,
TextAnalyticsRequestOptions> testRunner) {
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(DETECT_LANGUAGE_INPUTS, options);
}
void detectLanguageStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS);
}
void detectLanguageRunner(Consumer<List<DetectLanguageInput>> testRunner) {
testRunner.accept(TestUtils.getDetectLanguageInputs());
}
void detectSingleTextLanguageRunner(Consumer<String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS.get(0));
}
void detectLanguageInvalidCountryHintRunner(BiConsumer<String, String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS.get(1), "en");
}
void detectLanguageEmptyCountryHintRunner(BiConsumer<String, String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS.get(1), "");
}
void detectLanguageNoneCountryHintRunner(BiConsumer<String, String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS.get(1), "none");
}
void recognizeCategorizedEntitiesForSingleTextInputRunner(Consumer<String> testRunner) {
testRunner.accept(CATEGORIZED_ENTITY_INPUTS.get(0));
}
void recognizeCategorizedEntityStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(CATEGORIZED_ENTITY_INPUTS);
}
void recognizeCategorizedEntitiesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(CATEGORIZED_ENTITY_INPUTS, "en");
}
void recognizeBatchCategorizedEntitySingleErrorRunner(Consumer<List<TextDocumentInput>> testRunner) {
List<TextDocumentInput> inputs = Collections.singletonList(new TextDocumentInput("2", " "));
testRunner.accept(inputs);
}
void recognizeBatchCategorizedEntityRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(CATEGORIZED_ENTITY_INPUTS));
}
void recognizeBatchCategorizedEntitiesShowStatsRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(CATEGORIZED_ENTITY_INPUTS);
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(textDocumentInputs, options);
}
void recognizeStringBatchCategorizedEntitiesShowStatsRunner(
BiConsumer<List<String>, TextAnalyticsRequestOptions> testRunner) {
testRunner.accept(CATEGORIZED_ENTITY_INPUTS, new TextAnalyticsRequestOptions().setIncludeStatistics(true));
}
void recognizePiiSingleDocumentRunner(Consumer<String> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS.get(0));
}
void recognizePiiDomainFilterRunner(BiConsumer<String, RecognizePiiEntitiesOptions> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS.get(0),
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION));
}
void recognizePiiLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS, "en");
}
void recognizePiiEntitiesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS, "en");
}
void recognizeBatchPiiEntitySingleErrorRunner(Consumer<List<TextDocumentInput>> testRunner) {
List<TextDocumentInput> inputs = Collections.singletonList(new TextDocumentInput("2", " "));
testRunner.accept(inputs);
}
void recognizeBatchPiiEntitiesRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(PII_ENTITY_INPUTS));
}
void recognizeBatchPiiEntitiesShowStatsRunner(
BiConsumer<List<TextDocumentInput>, RecognizePiiEntitiesOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(PII_ENTITY_INPUTS);
RecognizePiiEntitiesOptions options = new RecognizePiiEntitiesOptions().setIncludeStatistics(true);
testRunner.accept(textDocumentInputs, options);
}
void recognizeStringBatchPiiEntitiesShowStatsRunner(
BiConsumer<List<String>, RecognizePiiEntitiesOptions> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS, new RecognizePiiEntitiesOptions().setIncludeStatistics(true));
}
void recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
BiConsumer<List<String>, RecognizePiiEntitiesOptions> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS,
new RecognizePiiEntitiesOptions().setCategoriesFilter(
PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER, PiiEntityCategory.ABA_ROUTING_NUMBER));
}
void recognizeLinkedEntitiesForSingleTextInputRunner(Consumer<String> testRunner) {
testRunner.accept(LINKED_ENTITY_INPUTS.get(0));
}
void recognizeBatchStringLinkedEntitiesShowStatsRunner(
BiConsumer<List<String>, TextAnalyticsRequestOptions> testRunner) {
testRunner.accept(LINKED_ENTITY_INPUTS, new TextAnalyticsRequestOptions().setIncludeStatistics(true));
}
void recognizeBatchLinkedEntitiesShowStatsRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) {
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(TestUtils.getTextDocumentInputs(LINKED_ENTITY_INPUTS), options);
}
void recognizeLinkedLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(LINKED_ENTITY_INPUTS, "en");
}
void recognizeLinkedStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(LINKED_ENTITY_INPUTS);
}
void recognizeBatchLinkedEntityRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(LINKED_ENTITY_INPUTS));
}
void extractKeyPhrasesForSingleTextInputRunner(Consumer<String> testRunner) {
testRunner.accept(KEY_PHRASE_INPUTS.get(1));
}
void extractBatchStringKeyPhrasesShowStatsRunner(BiConsumer<List<String>, TextAnalyticsRequestOptions> testRunner) {
testRunner.accept(KEY_PHRASE_INPUTS, new TextAnalyticsRequestOptions().setIncludeStatistics(true));
}
void extractBatchKeyPhrasesShowStatsRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(KEY_PHRASE_INPUTS);
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(textDocumentInputs, options);
}
void extractKeyPhrasesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(KEY_PHRASE_INPUTS, "en");
}
void extractKeyPhrasesStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(KEY_PHRASE_INPUTS);
}
void extractBatchKeyPhrasesRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(KEY_PHRASE_INPUTS));
}
void duplicateIdRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(getDuplicateTextDocumentInputs());
}
void analyzeSentimentForSingleTextInputRunner(Consumer<String> testRunner) {
testRunner.accept(SENTIMENT_INPUTS.get(0));
}
void analyzeSentimentForTextInputWithOpinionMiningRunner(BiConsumer<String, AnalyzeSentimentOptions> testRunner) {
testRunner.accept(SENTIMENT_INPUTS.get(0), new AnalyzeSentimentOptions().setIncludeOpinionMining(true));
}
void analyzeSentimentLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(SENTIMENT_INPUTS, "en");
}
void analyzeSentimentStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(SENTIMENT_INPUTS);
}
void analyzeBatchSentimentRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(SENTIMENT_INPUTS));
}
void analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner(BiConsumer<List<String>, AnalyzeSentimentOptions> testRunner) {
testRunner.accept(SENTIMENT_INPUTS,
new AnalyzeSentimentOptions().setIncludeStatistics(true).setIncludeOpinionMining(true));
}
void analyzeBatchSentimentShowStatsRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(SENTIMENT_INPUTS);
testRunner.accept(textDocumentInputs, new TextAnalyticsRequestOptions().setIncludeStatistics(true));
}
void analyzeBatchSentimentOpinionMining(BiConsumer<List<TextDocumentInput>, AnalyzeSentimentOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(SENTIMENT_INPUTS);
testRunner.accept(textDocumentInputs, new AnalyzeSentimentOptions().setIncludeOpinionMining(true)
.setIncludeStatistics(true));
}
void emptyTextRunner(Consumer<String> testRunner) {
testRunner.accept("");
}
void emptyListRunner(BiConsumer<List<TextDocumentInput>, String> testRunner) {
testRunner.accept(new ArrayList<>(), "'documents' cannot be empty.");
}
void detectLanguageInputEmptyIdRunner(Consumer<List<DetectLanguageInput>> testRunner) {
testRunner.accept(asList(new DetectLanguageInput("", DETECT_LANGUAGE_INPUTS.get(0))));
}
void emptyDocumentIdRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(asList(new TextDocumentInput("", CATEGORIZED_ENTITY_INPUTS.get(0))));
}
void tooManyDocumentsRunner(Consumer<List<String>> testRunner) {
final String documentInput = CATEGORIZED_ENTITY_INPUTS.get(0);
testRunner.accept(asList(
documentInput, documentInput, documentInput, documentInput, documentInput, documentInput,
documentInput, documentInput, documentInput, documentInput, documentInput, documentInput));
}
void emojiRunner(Consumer<String> testRunner, String text) {
testRunner.accept("👩 " + text);
}
void batchEmojiRunner(Consumer<List<TextDocumentInput>> testRunner, String text) {
testRunner.accept(Collections.singletonList(new TextDocumentInput("0", "👩 " + text)));
}
void emojiWithSkinToneModifierRunner(Consumer<String> testRunner, String text) {
testRunner.accept("👩🏻 " + text);
}
void emojiFamilyRunner(Consumer<String> testRunner, String text) {
testRunner.accept("👩👩👧👧 " + text);
}
void emojiFamilyWithSkinToneModifierRunner(Consumer<String> testRunner, String text) {
testRunner.accept("👩🏻👩🏽👧🏾👦🏿 " + text);
}
void diacriticsNfcRunner(Consumer<String> testRunner, String text) {
testRunner.accept("año " + text);
}
void diacriticsNfdRunner(Consumer<String> testRunner, String text) {
testRunner.accept("año " + text);
}
void koreanNfcRunner(Consumer<String> testRunner, String text) {
testRunner.accept("아가 " + text);
}
void koreanNfdRunner(Consumer<String> testRunner, String text) {
testRunner.accept("아가 " + text);
}
void zalgoTextRunner(Consumer<String> testRunner, String text) {
testRunner.accept("ơ̵̧̧̢̳̘̘͕͔͕̭̟̙͎͈̞͔̈̇̒̃͋̇̅͛̋͛̎́͑̄̐̂̎͗͝m̵͍͉̗̄̏͌̂̑̽̕͝͠g̵̢̡̢̡̨̡̧̛͉̞̯̠̤̣͕̟̫̫̼̰͓̦͖̣̣͎̋͒̈́̓̒̈̍̌̓̅͑̒̓̅̅͒̿̏́͗̀̇͛̏̀̈́̀̊̾̀̔͜͠͝ͅ " + text);
}
void healthcareStringInputRunner(BiConsumer<List<String>, AnalyzeHealthcareEntitiesOptions> testRunner) {
testRunner.accept(HEALTHCARE_INPUTS, new AnalyzeHealthcareEntitiesOptions().setIncludeStatistics(true));
}
void healthcareLroRunner(BiConsumer<List<TextDocumentInput>, AnalyzeHealthcareEntitiesOptions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", HEALTHCARE_INPUTS.get(0)),
new TextDocumentInput("1", HEALTHCARE_INPUTS.get(1))),
new AnalyzeHealthcareEntitiesOptions().setIncludeStatistics(true));
}
void healthcareLroPaginationRunner(
BiConsumer<List<TextDocumentInput>, AnalyzeHealthcareEntitiesOptions> testRunner, int totalDocuments) {
List<TextDocumentInput> documents = new ArrayList<>();
for (int i = 0; i < totalDocuments; i++) {
documents.add(new TextDocumentInput(Integer.toString(i), HEALTHCARE_INPUTS.get(0)));
}
testRunner.accept(documents, new AnalyzeHealthcareEntitiesOptions().setIncludeStatistics(true));
}
void analyzeHealthcareEntitiesForAssertionRunner(
BiConsumer<List<String>, AnalyzeHealthcareEntitiesOptions> testRunner) {
testRunner.accept(asList(
"All female participants that are premenopausal will be required to have a pregnancy test; "
+ "any participant who is pregnant or breastfeeding will not be included"),
new AnalyzeHealthcareEntitiesOptions().setIncludeStatistics(false));
}
void cancelHealthcareLroRunner(BiConsumer<List<TextDocumentInput>, AnalyzeHealthcareEntitiesOptions> testRunner) {
List<TextDocumentInput> documents = new ArrayList<>();
for (int i = 0; i < 10; i++) {
documents.add(new TextDocumentInput(Integer.toString(i), HEALTHCARE_INPUTS.get(0)));
}
testRunner.accept(documents, new AnalyzeHealthcareEntitiesOptions());
}
void analyzeActionsStringInputRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
CATEGORIZED_ENTITY_INPUTS.get(0),
PII_ENTITY_INPUTS.get(0)),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizeEntitiesActions(new RecognizeEntitiesAction())
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction())
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction())
.setAnalyzeSentimentActions(new AnalyzeSentimentAction()));
}
void analyzeBatchActionsRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", CATEGORIZED_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(0))),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizeEntitiesActions(new RecognizeEntitiesAction())
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction())
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction())
.setAnalyzeSentimentActions(new AnalyzeSentimentAction())
);
}
void analyzeActionsWithMultiSameKindActionsRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", CATEGORIZED_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(0))),
new TextAnalyticsActions()
.setRecognizeEntitiesActions(new RecognizeEntitiesAction().setActionName(CUSTOM_ACTION_NAME),
new RecognizeEntitiesAction())
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction().setActionName(CUSTOM_ACTION_NAME),
new RecognizePiiEntitiesAction())
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction().setActionName(CUSTOM_ACTION_NAME),
new ExtractKeyPhrasesAction())
.setRecognizeLinkedEntitiesActions(
new RecognizeLinkedEntitiesAction().setActionName(CUSTOM_ACTION_NAME),
new RecognizeLinkedEntitiesAction())
.setAnalyzeSentimentActions(new AnalyzeSentimentAction().setActionName(CUSTOM_ACTION_NAME),
new AnalyzeSentimentAction())
);
}
void analyzeActionsWithActionNamesRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(new TextDocumentInput("0", CATEGORIZED_ENTITY_INPUTS.get(0))),
new TextAnalyticsActions()
.setRecognizeEntitiesActions(new RecognizeEntitiesAction().setActionName(CUSTOM_ACTION_NAME))
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction().setActionName(CUSTOM_ACTION_NAME))
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction().setActionName(CUSTOM_ACTION_NAME))
.setAnalyzeSentimentActions(new AnalyzeSentimentAction().setActionName(CUSTOM_ACTION_NAME))
);
}
void analyzeBatchActionsPaginationRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner,
int documentsInTotal) {
List<TextDocumentInput> documents = new ArrayList<>();
for (int i = 0; i < documentsInTotal; i++) {
documents.add(new TextDocumentInput(Integer.toString(i), PII_ENTITY_INPUTS.get(0)));
}
testRunner.accept(documents,
new TextAnalyticsActions().setDisplayName("Test1")
.setRecognizeEntitiesActions(new RecognizeEntitiesAction())
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction())
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction())
.setRecognizeLinkedEntitiesActions(new RecognizeLinkedEntitiesAction())
.setAnalyzeSentimentActions(new AnalyzeSentimentAction()));
}
void analyzeEntitiesRecognitionRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", CATEGORIZED_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(0))),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizeEntitiesActions(new RecognizeEntitiesAction()));
}
void analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", PII_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(1))),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizePiiEntitiesActions(
new RecognizePiiEntitiesAction()
.setCategoriesFilter(PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER,
PiiEntityCategory.ABA_ROUTING_NUMBER)
));
}
void analyzePiiEntityRecognitionWithDomainFiltersRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", PII_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(1))),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizePiiEntitiesActions(
new RecognizePiiEntitiesAction()
.setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)
));
}
void analyzeLinkedEntityRecognitionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
LINKED_ENTITY_INPUTS,
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizeLinkedEntitiesActions(
new RecognizeLinkedEntitiesAction()));
}
void extractKeyPhrasesRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(CATEGORIZED_ENTITY_INPUTS.get(0), PII_ENTITY_INPUTS.get(0)),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setExtractKeyPhrasesActions(
new ExtractKeyPhrasesAction()));
}
void analyzeSentimentRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
SENTIMENT_INPUTS,
new TextAnalyticsActions()
.setAnalyzeSentimentActions(new AnalyzeSentimentAction()));
}
void analyzeHealthcareEntitiesRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
HEALTHCARE_INPUTS,
new TextAnalyticsActions()
.setAnalyzeHealthcareEntitiesActions(
new AnalyzeHealthcareEntitiesAction()));
}
void recognizeCustomEntitiesActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(CUSTOM_ENTITIES_INPUT,
new TextAnalyticsActions()
.setRecognizeCustomEntitiesActions(
new RecognizeCustomEntitiesAction(AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_DEPLOYMENT_NAME)));
}
void classifyCustomSingleCategoryActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(CUSTOM_SINGLE_CLASSIFICATION,
new TextAnalyticsActions()
.setSingleLabelClassifyActions(
new SingleLabelClassifyAction(AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_DEPLOYMENT_NAME)));
}
void classifyCustomMultiCategoryActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(CUSTOM_MULTI_CLASSIFICATION,
new TextAnalyticsActions()
.setMultiLabelClassifyActions(
new MultiLabelClassifyAction(AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_DEPLOYMENT_NAME)));
}
void recognizeCustomEntitiesRunner(BiConsumer<List<String>, List<String>> testRunner) {
testRunner.accept(CUSTOM_ENTITIES_INPUT,
asList(AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_DEPLOYMENT_NAME));
}
void classifyCustomSingleLabelRunner(BiConsumer<List<String>, List<String>> testRunner) {
testRunner.accept(CUSTOM_SINGLE_CLASSIFICATION,
asList(AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_DEPLOYMENT_NAME));
}
void classifyCustomMultiLabelRunner(BiConsumer<List<String>, List<String>> testRunner) {
testRunner.accept(CUSTOM_MULTI_CLASSIFICATION,
asList(AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_DEPLOYMENT_NAME));
}
void extractiveSummaryActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner,
Integer maxSentenceCount, ExtractiveSummarySentencesOrder extractiveSummarySentencesOrder) {
testRunner.accept(SUMMARY_INPUTS,
new TextAnalyticsActions()
.setExtractiveSummaryActions(
new ExtractiveSummaryAction()
.setMaxSentenceCount(maxSentenceCount)
.setOrderBy(extractiveSummarySentencesOrder)));
}
void extractiveSummaryRunner(BiConsumer<List<String>, ExtractiveSummaryOptions> testRunner,
Integer maxSentenceCount, ExtractiveSummarySentencesOrder extractiveSummarySentencesOrder) {
testRunner.accept(SUMMARY_INPUTS,
new ExtractiveSummaryOptions()
.setMaxSentenceCount(maxSentenceCount)
.setOrderBy(extractiveSummarySentencesOrder));
}
void extractiveSummaryMaxOverloadRunner(BiConsumer<List<TextDocumentInput>, ExtractiveSummaryOptions> testRunner,
Integer maxSentenceCount, ExtractiveSummarySentencesOrder extractiveSummarySentencesOrder) {
testRunner.accept(TestUtils.getTextDocumentInputs(SUMMARY_INPUTS),
new ExtractiveSummaryOptions()
.setMaxSentenceCount(maxSentenceCount)
.setOrderBy(extractiveSummarySentencesOrder));
}
void abstractiveSummaryActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner,
Integer sentenceCount) {
testRunner.accept(SUMMARY_INPUTS,
new TextAnalyticsActions()
.setAbstractiveSummaryActions(
new AbstractiveSummaryAction().setSentenceCount(sentenceCount)));
}
void abstractiveSummaryRunner(BiConsumer<List<String>, AbstractiveSummaryOptions> testRunner,
Integer sentenceCount) {
testRunner.accept(SUMMARY_INPUTS, new AbstractiveSummaryOptions().setSentenceCount(sentenceCount));
}
void abstractiveSummaryMaxOverloadRunner(BiConsumer<List<TextDocumentInput>, AbstractiveSummaryOptions> testRunner,
Integer sentenceCount) {
testRunner.accept(TestUtils.getTextDocumentInputs(SUMMARY_INPUTS),
new AbstractiveSummaryOptions().setSentenceCount(sentenceCount));
}
String getEndpoint(boolean isStaticResource) {
return interceptorManager.isPlaybackMode() ? "https:
: isStaticResource ? AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_ENDPOINT : AZURE_TEXT_ANALYTICS_ENDPOINT;
}
String getApiKey(boolean isStaticSource) {
return interceptorManager.isPlaybackMode() ? FAKE_API_KEY
: isStaticSource ? AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_API_KEY : AZURE_TEXT_ANALYTICS_API_KEY;
}
TextAnalyticsClientBuilder getTextAnalyticsClientBuilder(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
TextAnalyticsClientBuilder builder = new TextAnalyticsClientBuilder()
.endpoint(getEndpoint(isStaticResource))
.credential(new AzureKeyCredential(getApiKey(isStaticResource)))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (interceptorManager.isRecordMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
static void validateDetectLanguageResultCollectionWithResponse(boolean showStatistics,
DetectLanguageResultCollection expected, int expectedStatusCode,
Response<DetectLanguageResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateDetectLanguageResultCollection(showStatistics, expected, response.getValue());
}
static void validateDetectLanguageResultCollection(boolean showStatistics,
DetectLanguageResultCollection expected, DetectLanguageResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validatePrimaryLanguage(expectedItem.getPrimaryLanguage(), actualItem.getPrimaryLanguage()));
}
static void validateCategorizedEntitiesResultCollectionWithResponse(boolean showStatistics,
RecognizeEntitiesResultCollection expected, int expectedStatusCode,
Response<RecognizeEntitiesResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateCategorizedEntitiesResultCollection(showStatistics, expected, response.getValue());
}
static void validateCategorizedEntitiesResultCollection(boolean showStatistics,
RecognizeEntitiesResultCollection expected, RecognizeEntitiesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validateCategorizedEntities(actualItem.getEntities().stream().collect(Collectors.toList())));
}
static void validatePiiEntitiesResultCollectionWithResponse(boolean showStatistics,
RecognizePiiEntitiesResultCollection expected, int expectedStatusCode,
Response<RecognizePiiEntitiesResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validatePiiEntitiesResultCollection(showStatistics, expected, response.getValue());
}
static void validatePiiEntitiesResultCollection(boolean showStatistics,
RecognizePiiEntitiesResultCollection expected, RecognizePiiEntitiesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) -> {
final PiiEntityCollection expectedPiiEntityCollection = expectedItem.getEntities();
final PiiEntityCollection actualPiiEntityCollection = actualItem.getEntities();
assertEquals(expectedPiiEntityCollection.getRedactedText(), actualPiiEntityCollection.getRedactedText());
validatePiiEntities(
expectedPiiEntityCollection.stream().collect(Collectors.toList()),
actualPiiEntityCollection.stream().collect(Collectors.toList()));
});
}
static void validateLinkedEntitiesResultCollectionWithResponse(boolean showStatistics,
RecognizeLinkedEntitiesResultCollection expected, int expectedStatusCode,
Response<RecognizeLinkedEntitiesResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateLinkedEntitiesResultCollection(showStatistics, expected, response.getValue());
}
static void validateLinkedEntitiesResultCollection(boolean showStatistics,
RecognizeLinkedEntitiesResultCollection expected, RecognizeLinkedEntitiesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validateLinkedEntities(
expectedItem.getEntities().stream().collect(Collectors.toList()),
actualItem.getEntities().stream().collect(Collectors.toList())));
}
static void validateExtractKeyPhrasesResultCollectionWithResponse(boolean showStatistics,
ExtractKeyPhrasesResultCollection expected, int expectedStatusCode,
Response<ExtractKeyPhrasesResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateExtractKeyPhrasesResultCollection(showStatistics, expected, response.getValue());
}
static void validateClassifyDocumentResult(ClassifyDocumentResult expect, ClassifyDocumentResult actual) {
assertEquals(expect.getId(), actual.getId());
if (expect.isError()) {
assertNotNull(actual.isError());
} else {
assertNull(actual.getError());
List<ClassificationCategory> actualClassifications =
actual.getClassifications().stream().collect(Collectors.toList());
List<ClassificationCategory> expectClassifications =
expect.getClassifications().stream().collect(Collectors.toList());
assertEquals(expectClassifications.size(), actualClassifications.size());
for (int i = 0; i < expectClassifications.size(); i++) {
validateClassificationCategory(expectClassifications.get(i), actualClassifications.get(i));
}
}
}
static void validateClassificationCategory(ClassificationCategory expect, ClassificationCategory actual) {
assertEquals(expect.getCategory(), actual.getCategory());
assertNotNull(actual.getConfidenceScore());
}
static void validateExtractKeyPhrasesResultCollection(boolean showStatistics,
ExtractKeyPhrasesResultCollection expected, ExtractKeyPhrasesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validateKeyPhrases(
expectedItem.getKeyPhrases().stream().collect(Collectors.toList()),
actualItem.getKeyPhrases().stream().collect(Collectors.toList())));
}
static void validateAnalyzeSentimentResultCollectionWithResponse(boolean showStatistics,
boolean includeOpinionMining, AnalyzeSentimentResultCollection expected,
int expectedStatusCode, Response<AnalyzeSentimentResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateAnalyzeSentimentResultCollection(showStatistics, includeOpinionMining, expected, response.getValue());
}
static void validateAnalyzeSentimentResultCollection(boolean showStatistics, boolean includeOpinionMining,
AnalyzeSentimentResultCollection expected, AnalyzeSentimentResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validateDocumentSentiment(includeOpinionMining, expectedItem.getDocumentSentiment(),
actualItem.getDocumentSentiment()));
}
static void validateAnalyzeHealthcareEntitiesResultCollection(boolean showStatistics,
AnalyzeHealthcareEntitiesResultCollection expected, AnalyzeHealthcareEntitiesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual,
(expectedItem, actualItem) -> validateHealthcareEntityDocumentResult(expectedItem, actualItem));
}
/**
* Helper method to validate a single detected language.
*
* @param expectedLanguage detectedLanguage returned by the service.
* @param actualLanguage detectedLanguage returned by the API.
*/
static void validatePrimaryLanguage(DetectedLanguage expectedLanguage, DetectedLanguage actualLanguage) {
assertEquals(expectedLanguage.getName(), actualLanguage.getName());
assertEquals(expectedLanguage.getIso6391Name(), actualLanguage.getIso6391Name());
assertNotNull(actualLanguage.getConfidenceScore());
}
/**
* Helper method to validate a single categorized entity.
*
* @param actualCategorizedEntity CategorizedEntity returned by the API.
*/
static void validateCategorizedEntity(CategorizedEntity actualCategorizedEntity) {
assertNotNull(actualCategorizedEntity.getText());
assertNotNull(actualCategorizedEntity.getOffset());
assertNotNull(actualCategorizedEntity.getCategory());
assertNotNull(actualCategorizedEntity.getConfidenceScore());
}
/**
* Helper method to validate a single Personally Identifiable Information entity.
*
* @param expectedPiiEntity PiiEntity returned by the service.
* @param actualPiiEntity PiiEntity returned by the API.
*/
static void validatePiiEntity(PiiEntity expectedPiiEntity, PiiEntity actualPiiEntity) {
assertEquals(expectedPiiEntity.getOffset(), actualPiiEntity.getOffset());
assertEquals(expectedPiiEntity.getSubcategory(), actualPiiEntity.getSubcategory());
assertEquals(expectedPiiEntity.getText(), actualPiiEntity.getText());
assertEquals(expectedPiiEntity.getCategory(), actualPiiEntity.getCategory());
assertNotNull(actualPiiEntity.getConfidenceScore());
}
/**
* Helper method to validate a single linked entity.
*
* @param expectedLinkedEntity LinkedEntity returned by the service.
* @param actualLinkedEntity LinkedEntity returned by the API.
*/
static void validateLinkedEntity(LinkedEntity expectedLinkedEntity, LinkedEntity actualLinkedEntity) {
assertEquals(expectedLinkedEntity.getName(), actualLinkedEntity.getName());
assertEquals(expectedLinkedEntity.getDataSource(), actualLinkedEntity.getDataSource());
assertEquals(expectedLinkedEntity.getLanguage(), actualLinkedEntity.getLanguage());
if (interceptorManagerTestBase.isPlaybackMode()) {
assertEquals(REDACTED, actualLinkedEntity.getUrl());
} else {
assertEquals(expectedLinkedEntity.getUrl(), actualLinkedEntity.getUrl());
}
assertEquals(expectedLinkedEntity.getDataSourceEntityId(), actualLinkedEntity.getDataSourceEntityId());
validateLinkedEntityMatches(expectedLinkedEntity.getMatches().stream().collect(Collectors.toList()),
actualLinkedEntity.getMatches().stream().collect(Collectors.toList()));
}
/**
* Helper method to validate a single key phrase.
*
* @param expectedKeyPhrases key phrases returned by the service.
* @param actualKeyPhrases key phrases returned by the API.
*/
static void validateKeyPhrases(List<String> expectedKeyPhrases, List<String> actualKeyPhrases) {
assertEquals(expectedKeyPhrases.size(), actualKeyPhrases.size());
Collections.sort(expectedKeyPhrases);
Collections.sort(actualKeyPhrases);
for (int i = 0; i < expectedKeyPhrases.size(); i++) {
assertEquals(expectedKeyPhrases.get(i), actualKeyPhrases.get(i));
}
}
/**
* Helper method to validate the list of categorized entities.
*
* @param actualCategorizedEntityList categorizedEntities returned by the API.
*/
static void validateCategorizedEntities(List<CategorizedEntity> actualCategorizedEntityList) {
for (int i = 0; i < actualCategorizedEntityList.size(); i++) {
validateCategorizedEntity(actualCategorizedEntityList.get(i));
}
}
/**
* Helper method to validate the list of Personally Identifiable Information entities.
*
* @param expectedPiiEntityList piiEntities returned by the service.
* @param actualPiiEntityList piiEntities returned by the API.
*/
static void validatePiiEntities(List<PiiEntity> expectedPiiEntityList, List<PiiEntity> actualPiiEntityList) {
assertEquals(expectedPiiEntityList.size(), actualPiiEntityList.size());
expectedPiiEntityList.sort(Comparator.comparing(PiiEntity::getText));
actualPiiEntityList.sort(Comparator.comparing(PiiEntity::getText));
for (int i = 0; i < expectedPiiEntityList.size(); i++) {
PiiEntity expectedPiiEntity = expectedPiiEntityList.get(i);
PiiEntity actualPiiEntity = actualPiiEntityList.get(i);
validatePiiEntity(expectedPiiEntity, actualPiiEntity);
}
}
/**
* Helper method to validate the list of linked entities.
*
* @param expectedLinkedEntityList linkedEntities returned by the service.
* @param actualLinkedEntityList linkedEntities returned by the API.
*/
static void validateLinkedEntities(List<LinkedEntity> expectedLinkedEntityList,
List<LinkedEntity> actualLinkedEntityList) {
assertEquals(expectedLinkedEntityList.size(), actualLinkedEntityList.size());
expectedLinkedEntityList.sort(Comparator.comparing(LinkedEntity::getName));
actualLinkedEntityList.sort(Comparator.comparing(LinkedEntity::getName));
for (int i = 0; i < expectedLinkedEntityList.size(); i++) {
LinkedEntity expectedLinkedEntity = expectedLinkedEntityList.get(i);
LinkedEntity actualLinkedEntity = actualLinkedEntityList.get(i);
validateLinkedEntity(expectedLinkedEntity, actualLinkedEntity);
}
}
/**
* Helper method to validate the list of sentence sentiment. Can't really validate score numbers because it
* frequently changed by background model computation.
*
* @param expectedSentimentList a list of analyzed sentence sentiment returned by the service.
* @param actualSentimentList a list of analyzed sentence sentiment returned by the API.
*/
static void validateSentenceSentimentList(boolean includeOpinionMining, List<SentenceSentiment> expectedSentimentList,
List<SentenceSentiment> actualSentimentList) {
assertEquals(expectedSentimentList.size(), actualSentimentList.size());
for (int i = 0; i < expectedSentimentList.size(); i++) {
validateSentenceSentiment(includeOpinionMining, expectedSentimentList.get(i), actualSentimentList.get(i));
}
}
/**
* Helper method to validate one pair of analyzed sentiments. Can't really validate score numbers because it
* frequently changed by background model computation.
*
* @param expectedSentiment analyzed sentence sentiment returned by the service.
* @param actualSentiment analyzed sentence sentiment returned by the API.
*/
static void validateSentenceSentiment(boolean includeOpinionMining, SentenceSentiment expectedSentiment,
SentenceSentiment actualSentiment) {
assertEquals(expectedSentiment.getSentiment(), actualSentiment.getSentiment());
assertEquals(expectedSentiment.getText(), actualSentiment.getText());
assertEquals(expectedSentiment.getOffset(), actualSentiment.getOffset());
assertEquals(expectedSentiment.getLength(), actualSentiment.getLength());
if (includeOpinionMining) {
validateSentenceOpinions(expectedSentiment.getOpinions().stream().collect(Collectors.toList()),
actualSentiment.getOpinions().stream().collect(Collectors.toList()));
} else {
assertNull(actualSentiment.getOpinions());
}
}
/**
* Helper method to validate sentence's opinions.
*
* @param expectedSentenceOpinions a list of sentence opinions returned by the service.
* @param actualSentenceOpinions a list of sentence opinions returned by the API.
*/
static void validateSentenceOpinions(List<SentenceOpinion> expectedSentenceOpinions,
List<SentenceOpinion> actualSentenceOpinions) {
assertEquals(expectedSentenceOpinions.size(), actualSentenceOpinions.size());
for (int i = 0; i < actualSentenceOpinions.size(); i++) {
final SentenceOpinion expectedSentenceOpinion = expectedSentenceOpinions.get(i);
final SentenceOpinion actualSentenceOpinion = actualSentenceOpinions.get(i);
validateTargetSentiment(expectedSentenceOpinion.getTarget(), actualSentenceOpinion.getTarget());
validateAssessmentList(expectedSentenceOpinion.getAssessments().stream().collect(Collectors.toList()),
actualSentenceOpinion.getAssessments().stream().collect(Collectors.toList()));
}
}
/**
* Helper method to validate target sentiment.
*
* @param expected An expected target sentiment.
* @param actual An actual target sentiment.
*/
static void validateTargetSentiment(TargetSentiment expected, TargetSentiment actual) {
assertEquals(expected.getSentiment(), actual.getSentiment());
assertEquals(expected.getText(), actual.getText());
assertEquals(expected.getOffset(), actual.getOffset());
}
/**
* Helper method to validate a list of {@link AssessmentSentiment}.
*
* @param expected A list of expected assessment sentiments.
* @param actual A list of actual assessment sentiments.
*/
static void validateAssessmentList(List<AssessmentSentiment> expected, List<AssessmentSentiment> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < expected.size(); i++) {
validateAssessmentSentiment(expected.get(i), actual.get(i));
}
}
/**
* Helper method to validate assessment sentiment.
*
* @param expect An expected assessment sentiment.
* @param actual An actual assessment sentiment.
*/
static void validateAssessmentSentiment(AssessmentSentiment expect, AssessmentSentiment actual) {
assertEquals(expect.getSentiment(), actual.getSentiment());
assertEquals(expect.getText(), actual.getText());
assertEquals(expect.isNegated(), actual.isNegated());
assertEquals(expect.getOffset(), actual.getOffset());
}
/**
* Helper method to validate one pair of analyzed sentiments. Can't really validate score numbers because it
* frequently changed by background model computation.
*
* @param expectedSentiment analyzed document sentiment returned by the service.
* @param actualSentiment analyzed document sentiment returned by the API.
*/
static void validateDocumentSentiment(boolean includeOpinionMining, DocumentSentiment expectedSentiment,
DocumentSentiment actualSentiment) {
assertEquals(expectedSentiment.getSentiment(), actualSentiment.getSentiment());
validateSentenceSentimentList(includeOpinionMining,
expectedSentiment.getSentences().stream().collect(Collectors.toList()),
actualSentiment.getSentences().stream().collect(Collectors.toList()));
}
static void validateLabelClassificationResult(ClassifyDocumentResult documentResult) {
assertNotNull(documentResult.getId());
if (documentResult.isError()) {
assertNotNull(documentResult.getError());
} else {
assertNull(documentResult.getError());
for (ClassificationCategory classification : documentResult.getClassifications()) {
validateDocumentClassification(classification);
}
}
}
static void validateDocumentClassification(ClassificationCategory classificationCategory) {
assertNotNull(classificationCategory.getCategory());
assertNotNull(classificationCategory.getConfidenceScore());
}
static void validateEntityAssertion(HealthcareEntityAssertion expected, HealthcareEntityAssertion actual) {
if (actual == expected) {
return;
}
assertEquals(expected.getConditionality(), actual.getConditionality());
assertEquals(expected.getAssociation(), actual.getAssociation());
assertEquals(expected.getCertainty(), actual.getCertainty());
}
static void validateEntityDataSourceList(IterableStream<EntityDataSource> expected,
IterableStream<EntityDataSource> actual) {
if (expected == actual) {
return;
} else if (expected == null || actual == null) {
assertTrue(false);
}
}
static void validateHealthcareEntityDocumentResult(AnalyzeHealthcareEntitiesResult expected,
AnalyzeHealthcareEntitiesResult actual) {
validateHealthcareEntityRelations(expected.getEntityRelations().stream().collect(Collectors.toList()),
actual.getEntityRelations().stream().collect(Collectors.toList()));
validateHealthcareEntities(expected.getEntities().stream().collect(Collectors.toList()),
actual.getEntities().stream().collect(Collectors.toList()));
}
static void validateHealthcareEntityRelations(List<HealthcareEntityRelation> expected,
List<HealthcareEntityRelation> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < expected.size(); i++) {
validateHealthcareEntityRelation(expected.get(i), actual.get(i));
}
}
static void validateHealthcareEntityRelation(HealthcareEntityRelation expected, HealthcareEntityRelation actual) {
final List<HealthcareEntityRelationRole> expectedRoles = expected.getRoles().stream().collect(Collectors.toList());
final List<HealthcareEntityRelationRole> actualRoles = actual.getRoles().stream().collect(Collectors.toList());
assertEquals(expected.getRelationType(), actual.getRelationType());
assertNotNull(actual.getConfidenceScore());
for (int i = 0; i < expectedRoles.size(); i++) {
validateHealthcareEntityRelationRole(expectedRoles.get(i), actualRoles.get(i));
}
}
static void validateHealthcareEntityRelationRole(HealthcareEntityRelationRole expected,
HealthcareEntityRelationRole actual) {
assertEquals(expected.getName(), actual.getName());
validateHealthcareEntity(expected.getEntity(), actual.getEntity());
}
static void validateHealthcareEntities(List<HealthcareEntity> expected, List<HealthcareEntity> actual) {
assertEquals(expected.size(), actual.size());
expected.sort(Comparator.comparing(HealthcareEntity::getText));
actual.sort(Comparator.comparing(HealthcareEntity::getText));
for (int i = 0; i < expected.size(); i++) {
validateHealthcareEntity(expected.get(i), actual.get(i));
}
}
static void validateAnalyzeHealthcareEntitiesResultCollectionList(boolean showStatistics,
List<AnalyzeHealthcareEntitiesResultCollection> expected,
List<AnalyzeHealthcareEntitiesResultCollection> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAnalyzeHealthcareEntitiesResultCollection(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateAnalyzeBatchActionsResultList(boolean showStatistics, boolean includeOpinionMining,
List<AnalyzeActionsResult> expected, List<AnalyzeActionsResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAnalyzeActionsResult(showStatistics, includeOpinionMining, expected.get(i), actual.get(i));
}
}
static void validateAnalyzeActionsResult(boolean showStatistics, boolean includeOpinionMining,
AnalyzeActionsResult expected, AnalyzeActionsResult actual) {
validateRecognizeEntitiesActionResults(showStatistics,
expected.getRecognizeEntitiesResults().stream().collect(Collectors.toList()),
actual.getRecognizeEntitiesResults().stream().collect(Collectors.toList()));
validateRecognizeLinkedEntitiesActionResults(showStatistics,
expected.getRecognizeLinkedEntitiesResults().stream().collect(Collectors.toList()),
actual.getRecognizeLinkedEntitiesResults().stream().collect(Collectors.toList()));
validateRecognizePiiEntitiesActionResults(showStatistics,
expected.getRecognizePiiEntitiesResults().stream().collect(Collectors.toList()),
actual.getRecognizePiiEntitiesResults().stream().collect(Collectors.toList()));
validateAnalyzeHealthcareEntitiesActionResults(showStatistics,
expected.getAnalyzeHealthcareEntitiesResults().stream().collect(Collectors.toList()),
actual.getAnalyzeHealthcareEntitiesResults().stream().collect(Collectors.toList()));
validateExtractKeyPhrasesActionResults(showStatistics,
expected.getExtractKeyPhrasesResults().stream().collect(Collectors.toList()),
actual.getExtractKeyPhrasesResults().stream().collect(Collectors.toList()));
validateAnalyzeSentimentActionResults(showStatistics, includeOpinionMining,
expected.getAnalyzeSentimentResults().stream().collect(Collectors.toList()),
actual.getAnalyzeSentimentResults().stream().collect(Collectors.toList()));
validateExtractiveSummaryActionResults(showStatistics,
expected.getExtractiveSummaryResults().stream().collect(Collectors.toList()),
actual.getExtractiveSummaryResults().stream().collect(Collectors.toList()));
validateAbstractiveSummaryActionResults(showStatistics,
expected.getAbstractiveSummaryResults().stream().collect(Collectors.toList()),
actual.getAbstractiveSummaryResults().stream().collect(Collectors.toList()));
}
static void validateRecognizeEntitiesActionResults(boolean showStatistics,
List<RecognizeEntitiesActionResult> expected, List<RecognizeEntitiesActionResult> actual) {
for (int i = 0; i < actual.size(); i++) {
validateRecognizeEntitiesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateRecognizeLinkedEntitiesActionResults(boolean showStatistics,
List<RecognizeLinkedEntitiesActionResult> expected, List<RecognizeLinkedEntitiesActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateRecognizeLinkedEntitiesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateRecognizePiiEntitiesActionResults(boolean showStatistics,
List<RecognizePiiEntitiesActionResult> expected, List<RecognizePiiEntitiesActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateRecognizePiiEntitiesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateAnalyzeHealthcareEntitiesActionResults(boolean showStatistics,
List<AnalyzeHealthcareEntitiesActionResult> expected, List<AnalyzeHealthcareEntitiesActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAnalyzeHealthcareEntitiesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateExtractKeyPhrasesActionResults(boolean showStatistics,
List<ExtractKeyPhrasesActionResult> expected, List<ExtractKeyPhrasesActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateExtractKeyPhrasesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateAnalyzeSentimentActionResults(boolean showStatistics, boolean includeOpinionMining,
List<AnalyzeSentimentActionResult> expected, List<AnalyzeSentimentActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAnalyzeSentimentActionResult(showStatistics, includeOpinionMining, expected.get(i), actual.get(i));
}
}
static void validateRecognizeEntitiesActionResult(boolean showStatistics,
RecognizeEntitiesActionResult expected, RecognizeEntitiesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateCategorizedEntitiesResultCollection(showStatistics, expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateRecognizeLinkedEntitiesActionResult(boolean showStatistics,
RecognizeLinkedEntitiesActionResult expected, RecognizeLinkedEntitiesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateLinkedEntitiesResultCollection(showStatistics, expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateRecognizePiiEntitiesActionResult(boolean showStatistics,
RecognizePiiEntitiesActionResult expected, RecognizePiiEntitiesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validatePiiEntitiesResultCollection(showStatistics, expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateAnalyzeHealthcareEntitiesActionResult(boolean showStatistics,
AnalyzeHealthcareEntitiesActionResult expected, AnalyzeHealthcareEntitiesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateAnalyzeHealthcareEntitiesResultCollection(showStatistics,
expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateExtractKeyPhrasesActionResult(boolean showStatistics,
ExtractKeyPhrasesActionResult expected, ExtractKeyPhrasesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateExtractKeyPhrasesResultCollection(showStatistics, expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateAnalyzeSentimentActionResult(boolean showStatistics, boolean includeOpinionMining,
AnalyzeSentimentActionResult expected, AnalyzeSentimentActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateAnalyzeSentimentResultCollection(showStatistics, includeOpinionMining,
expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
/**
* Helper method to verify {@link TextAnalyticsResult documents} returned in a batch request.
*/
static <T extends TextAnalyticsResult, H extends IterableStream<T>> void validateTextAnalyticsResult(
boolean showStatistics, H expectedResults, H actualResults, BiConsumer<T, T> additionalAssertions) {
final Map<String, T> expected = expectedResults.stream().collect(
Collectors.toMap(TextAnalyticsResult::getId, r -> r));
final Map<String, T> actual = actualResults.stream().collect(
Collectors.toMap(TextAnalyticsResult::getId, r -> r));
assertEquals(expected.size(), actual.size());
if (showStatistics) {
if (expectedResults instanceof AnalyzeHealthcareEntitiesResultCollection) {
validateBatchStatistics(((AnalyzeHealthcareEntitiesResultCollection) expectedResults).getStatistics(),
((AnalyzeHealthcareEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof AnalyzeSentimentResultCollection) {
validateBatchStatistics(((AnalyzeSentimentResultCollection) expectedResults).getStatistics(),
((AnalyzeSentimentResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ClassifyDocumentResultCollection) {
validateBatchStatistics(((ClassifyDocumentResultCollection) expectedResults).getStatistics(),
((ClassifyDocumentResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof DetectLanguageResultCollection) {
validateBatchStatistics(((DetectLanguageResultCollection) expectedResults).getStatistics(),
((DetectLanguageResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ExtractKeyPhrasesResultCollection) {
validateBatchStatistics(((ExtractKeyPhrasesResultCollection) expectedResults).getStatistics(),
((ExtractKeyPhrasesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ExtractiveSummaryResultCollection) {
validateBatchStatistics(((ExtractiveSummaryResultCollection) expectedResults).getStatistics(),
((ExtractiveSummaryResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeCustomEntitiesResultCollection) {
validateBatchStatistics(((RecognizeCustomEntitiesResultCollection) expectedResults).getStatistics(),
((RecognizeCustomEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeEntitiesResultCollection) {
validateBatchStatistics(((RecognizeEntitiesResultCollection) expectedResults).getStatistics(),
((RecognizeEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeLinkedEntitiesResultCollection) {
validateBatchStatistics(((RecognizeLinkedEntitiesResultCollection) expectedResults).getStatistics(),
((RecognizeLinkedEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizePiiEntitiesResultCollection) {
validateBatchStatistics(((RecognizePiiEntitiesResultCollection) expectedResults).getStatistics(),
((RecognizePiiEntitiesResultCollection) actualResults).getStatistics());
}
} else {
if (expectedResults instanceof AnalyzeHealthcareEntitiesResultCollection) {
assertNull(((AnalyzeHealthcareEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof AnalyzeSentimentResultCollection) {
assertNull(((AnalyzeSentimentResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ClassifyDocumentResultCollection) {
assertNull(((ClassifyDocumentResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof DetectLanguageResultCollection) {
assertNull(((DetectLanguageResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ExtractKeyPhrasesResultCollection) {
assertNull(((ExtractKeyPhrasesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ExtractiveSummaryResultCollection) {
assertNull(((ExtractiveSummaryResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeCustomEntitiesResultCollection) {
assertNull(((RecognizeCustomEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeEntitiesResultCollection) {
assertNull(((RecognizeEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeLinkedEntitiesResultCollection) {
assertNull(((RecognizeLinkedEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizePiiEntitiesResultCollection) {
assertNull(((RecognizePiiEntitiesResultCollection) actualResults).getStatistics());
}
}
expected.forEach((key, expectedValue) -> {
T actualValue = actual.get(key);
assertNotNull(actualValue);
if (showStatistics) {
validateDocumentStatistics(expectedValue.getStatistics(), actualValue.getStatistics());
}
if (expectedValue.getError() == null) {
assertNull(actualValue.getError());
} else {
assertNotNull(actualValue.getError());
assertEquals(expectedValue.getError().getErrorCode(), actualValue.getError().getErrorCode());
validateErrorDocument(expectedValue.getError(), actualValue.getError());
}
additionalAssertions.accept(expectedValue, actualValue);
});
}
/**
* Helper method to verify TextBatchStatistics.
*
* @param expectedStatistics the expected value for TextBatchStatistics.
* @param actualStatistics the value returned by API.
*/
private static void validateBatchStatistics(TextDocumentBatchStatistics expectedStatistics,
TextDocumentBatchStatistics actualStatistics) {
assertEquals(expectedStatistics.getDocumentCount(), actualStatistics.getDocumentCount());
assertEquals(expectedStatistics.getInvalidDocumentCount(), actualStatistics.getInvalidDocumentCount());
assertEquals(expectedStatistics.getValidDocumentCount(), actualStatistics.getValidDocumentCount());
assertEquals(expectedStatistics.getTransactionCount(), actualStatistics.getTransactionCount());
}
/**
* Helper method to verify TextDocumentStatistics.
*
* @param expected the expected value for TextDocumentStatistics.
* @param actual the value returned by API.
*/
private static void validateDocumentStatistics(TextDocumentStatistics expected, TextDocumentStatistics actual) {
assertEquals(expected.getCharacterCount(), actual.getCharacterCount());
assertEquals(expected.getTransactionCount(), actual.getTransactionCount());
}
/**
* Helper method to verify LinkedEntityMatches.
*
* @param expectedLinkedEntityMatches the expected value for LinkedEntityMatches.
* @param actualLinkedEntityMatches the value returned by API.
*/
private static void validateLinkedEntityMatches(List<LinkedEntityMatch> expectedLinkedEntityMatches,
List<LinkedEntityMatch> actualLinkedEntityMatches) {
assertEquals(expectedLinkedEntityMatches.size(), actualLinkedEntityMatches.size());
expectedLinkedEntityMatches.sort(Comparator.comparing(LinkedEntityMatch::getText));
actualLinkedEntityMatches.sort(Comparator.comparing(LinkedEntityMatch::getText));
for (int i = 0; i < expectedLinkedEntityMatches.size(); i++) {
LinkedEntityMatch expectedLinkedEntity = expectedLinkedEntityMatches.get(i);
LinkedEntityMatch actualLinkedEntity = actualLinkedEntityMatches.get(i);
assertEquals(expectedLinkedEntity.getText(), actualLinkedEntity.getText());
assertEquals(expectedLinkedEntity.getOffset(), actualLinkedEntity.getOffset());
assertNotNull(actualLinkedEntity.getConfidenceScore());
}
}
/**
* Helper method to verify the error document.
*
* @param expectedError the Error returned from the service.
* @param actualError the Error returned from the API.
*/
static void validateErrorDocument(TextAnalyticsError expectedError, TextAnalyticsError actualError) {
assertEquals(expectedError.getErrorCode(), actualError.getErrorCode());
assertNotNull(actualError.getMessage());
}
static void validateExtractiveSummaryResultCollection(boolean showStatistics,
ExtractiveSummaryResultCollection expected, ExtractiveSummaryResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual,
(expectedItem, actualItem) -> validateDocumentExtractiveSummaryResult(expectedItem, actualItem));
}
static void validateDocumentExtractiveSummaryResult(ExtractiveSummaryResult expect,
ExtractiveSummaryResult actual) {
validateExtractiveSummarySentenceList(
expect.getSentences().stream().collect(Collectors.toList()),
actual.getSentences().stream().collect(Collectors.toList())
);
}
static void validateExtractiveSummaryActionResults(boolean showStatistics,
List<ExtractiveSummaryActionResult> expected, List<ExtractiveSummaryActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateExtractiveSummaryActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateExtractiveSummaryActionResult(boolean showStatistics,
ExtractiveSummaryActionResult expected,
ExtractiveSummaryActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateExtractiveSummaryResultCollection(showStatistics,
expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateExtractiveSummarySentenceList(List<ExtractiveSummarySentence> expect,
List<ExtractiveSummarySentence> actual) {
assertEquals(expect.size(), actual.size());
for (int i = 0; i < expect.size(); i++) {
validateExtractiveSummarySentence(expect.get(i), actual.get(i));
}
}
static void validateExtractiveSummarySentence(ExtractiveSummarySentence expect, ExtractiveSummarySentence actual) {
assertEquals(expect.getText(), actual.getText());
assertEquals(expect.getOffset(), actual.getOffset());
assertEquals(expect.getLength(), actual.getLength());
assertNotNull(actual.getRankScore());
}
static boolean isAscendingOrderByOffSet(List<ExtractiveSummarySentence> extractiveSummarySentences) {
int currMin = Integer.MIN_VALUE;
for (ExtractiveSummarySentence extractiveSummarySentence : extractiveSummarySentences) {
if (extractiveSummarySentence.getOffset() <= currMin) {
return false;
} else {
currMin = extractiveSummarySentence.getOffset();
}
}
return true;
}
static boolean isDescendingOrderByRankScore(List<ExtractiveSummarySentence> extractiveSummarySentences) {
double currentMax = Double.MAX_VALUE;
for (ExtractiveSummarySentence extractiveSummarySentence : extractiveSummarySentences) {
if (extractiveSummarySentence.getRankScore() > currentMax) {
return false;
} else {
currentMax = extractiveSummarySentence.getRankScore();
}
}
return true;
}
static void validateAbstractiveSummaryActionResults(boolean showStatistics,
List<AbstractiveSummaryActionResult> expected, List<AbstractiveSummaryActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAbstractiveSummaryActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateAbstractiveSummaryActionResult(boolean showStatistics,
AbstractiveSummaryActionResult expected, AbstractiveSummaryActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateAbstractiveSummaryResultCollection(showStatistics,
expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateAbstractiveSummaryResultCollection(boolean showStatistics,
AbstractiveSummaryResultCollection expected, AbstractiveSummaryResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual,
(expectedItem, actualItem) -> validateDocumentAbstractiveSummaryResult(actualItem));
}
static void validateDocumentAbstractiveSummaryResult(AbstractiveSummaryResult actual) {
validateAbstractiveSummaries(actual.getSummaries().stream().collect(Collectors.toList()));
}
static void validateAbstractiveSummaries(List<AbstractiveSummary> actual) {
for (int i = 0; i < actual.size(); i++) {
final AbstractiveSummary abstractiveSummary = actual.get(i);
assertNotNull(abstractiveSummary.getText());
validateSummaryContextList(
abstractiveSummary.getContexts().stream().collect(Collectors.toList()));
}
}
static void validateSummaryContextList(List<AbstractiveSummaryContext> actual) {
for (int i = 0; i < actual.size(); i++) {
AbstractiveSummaryContext actualAbstractiveSummaryContext = actual.get(i);
assertNotNull(actualAbstractiveSummaryContext.getOffset());
assertNotNull(actualAbstractiveSummaryContext.getLength());
}
}
} | class TextAnalyticsClientTestBase extends TestProxyTestBase {
static final String BATCH_ERROR_EXCEPTION_MESSAGE = "Error in accessing the property on document id: 2, when %s returned with an error: Document text is empty. ErrorCodeValue: {InvalidDocument}";
static final String INVALID_DOCUMENT_BATCH_NPE_MESSAGE = "'documents' cannot be null.";
static final String INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE = "'documents' cannot be empty.";
static final String INVALID_DOCUMENT_NPE_MESSAGE = "'document' cannot be null.";
static final String REDACTED = "REDACTED";
static final String AZURE_TEXT_ANALYTICS_ENDPOINT =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_ENDPOINT");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_ENDPOINT =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_ENDPOINT");
static final String AZURE_TEXT_ANALYTICS_API_KEY =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_API_KEY");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_API_KEY =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_API_KEY");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_PROJECT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_PROJECT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_DEPLOYMENT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_DEPLOYMENT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_PROJECT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_PROJECT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_DEPLOYMENT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_DEPLOYMENT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_PROJECT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_PROJECT_NAME");
static final String AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_DEPLOYMENT_NAME =
Configuration.getGlobalConfiguration().get("AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_DEPLOYMENT_NAME");
static InterceptorManager interceptorManagerTestBase;
Duration durationTestMode;
/**
* Use duration of nearly zero value for PLAYBACK test mode, otherwise, use default duration value for LIVE mode.
*/
@Override
protected void beforeTest() {
if (interceptorManager.isPlaybackMode()) {
durationTestMode = Duration.ofMillis(1);
} else {
durationTestMode = DEFAULT_POLL_INTERVAL;
}
interceptorManagerTestBase = interceptorManager;
}
protected <T, U> SyncPoller<T, U> setPollInterval(SyncPoller<T, U> syncPoller) {
return syncPoller.setPollInterval(durationTestMode);
}
@Test
abstract void detectLanguagesBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguagesBatchListCountryHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesBatchInputSingleError(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForBatchStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForListLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesForListWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForDomainFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForTextInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForBatchStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForListLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmoji(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsWithMultiSameKindActions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeCustomEntitiesAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void singleLabelClassificationAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void multiCategoryClassifyAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void singleLabelClassificationStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void multiLabelClassificationStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(
HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionWithNonDefaultSentenceCount(
HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(
HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
@Test
abstract void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion);
void detectLanguageShowStatisticsRunner(BiConsumer<List<DetectLanguageInput>,
TextAnalyticsRequestOptions> testRunner) {
final List<DetectLanguageInput> detectLanguageInputs = TestUtils.getDetectLanguageInputs();
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(detectLanguageInputs, options);
}
void detectLanguageDuplicateIdRunner(BiConsumer<List<DetectLanguageInput>,
TextAnalyticsRequestOptions> testRunner) {
testRunner.accept(TestUtils.getDuplicateIdDetectLanguageInputs(), null);
}
void detectLanguagesCountryHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS, "US");
}
void detectLanguagesBatchListCountryHintWithOptionsRunner(BiConsumer<List<String>,
TextAnalyticsRequestOptions> testRunner) {
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(DETECT_LANGUAGE_INPUTS, options);
}
void detectLanguageStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS);
}
void detectLanguageRunner(Consumer<List<DetectLanguageInput>> testRunner) {
testRunner.accept(TestUtils.getDetectLanguageInputs());
}
void detectSingleTextLanguageRunner(Consumer<String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS.get(0));
}
void detectLanguageInvalidCountryHintRunner(BiConsumer<String, String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS.get(1), "en");
}
void detectLanguageEmptyCountryHintRunner(BiConsumer<String, String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS.get(1), "");
}
void detectLanguageNoneCountryHintRunner(BiConsumer<String, String> testRunner) {
testRunner.accept(DETECT_LANGUAGE_INPUTS.get(1), "none");
}
void recognizeCategorizedEntitiesForSingleTextInputRunner(Consumer<String> testRunner) {
testRunner.accept(CATEGORIZED_ENTITY_INPUTS.get(0));
}
void recognizeCategorizedEntityStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(CATEGORIZED_ENTITY_INPUTS);
}
void recognizeCategorizedEntitiesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(CATEGORIZED_ENTITY_INPUTS, "en");
}
void recognizeBatchCategorizedEntitySingleErrorRunner(Consumer<List<TextDocumentInput>> testRunner) {
List<TextDocumentInput> inputs = Collections.singletonList(new TextDocumentInput("2", " "));
testRunner.accept(inputs);
}
void recognizeBatchCategorizedEntityRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(CATEGORIZED_ENTITY_INPUTS));
}
void recognizeBatchCategorizedEntitiesShowStatsRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(CATEGORIZED_ENTITY_INPUTS);
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(textDocumentInputs, options);
}
void recognizeStringBatchCategorizedEntitiesShowStatsRunner(
BiConsumer<List<String>, TextAnalyticsRequestOptions> testRunner) {
testRunner.accept(CATEGORIZED_ENTITY_INPUTS, new TextAnalyticsRequestOptions().setIncludeStatistics(true));
}
void recognizePiiSingleDocumentRunner(Consumer<String> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS.get(0));
}
void recognizePiiDomainFilterRunner(BiConsumer<String, RecognizePiiEntitiesOptions> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS.get(0),
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION));
}
void recognizePiiLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS, "en");
}
void recognizePiiEntitiesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS, "en");
}
void recognizeBatchPiiEntitySingleErrorRunner(Consumer<List<TextDocumentInput>> testRunner) {
List<TextDocumentInput> inputs = Collections.singletonList(new TextDocumentInput("2", " "));
testRunner.accept(inputs);
}
void recognizeBatchPiiEntitiesRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(PII_ENTITY_INPUTS));
}
void recognizeBatchPiiEntitiesShowStatsRunner(
BiConsumer<List<TextDocumentInput>, RecognizePiiEntitiesOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(PII_ENTITY_INPUTS);
RecognizePiiEntitiesOptions options = new RecognizePiiEntitiesOptions().setIncludeStatistics(true);
testRunner.accept(textDocumentInputs, options);
}
void recognizeStringBatchPiiEntitiesShowStatsRunner(
BiConsumer<List<String>, RecognizePiiEntitiesOptions> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS, new RecognizePiiEntitiesOptions().setIncludeStatistics(true));
}
void recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
BiConsumer<List<String>, RecognizePiiEntitiesOptions> testRunner) {
testRunner.accept(PII_ENTITY_INPUTS,
new RecognizePiiEntitiesOptions().setCategoriesFilter(
PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER, PiiEntityCategory.ABA_ROUTING_NUMBER));
}
void recognizeLinkedEntitiesForSingleTextInputRunner(Consumer<String> testRunner) {
testRunner.accept(LINKED_ENTITY_INPUTS.get(0));
}
void recognizeBatchStringLinkedEntitiesShowStatsRunner(
BiConsumer<List<String>, TextAnalyticsRequestOptions> testRunner) {
testRunner.accept(LINKED_ENTITY_INPUTS, new TextAnalyticsRequestOptions().setIncludeStatistics(true));
}
void recognizeBatchLinkedEntitiesShowStatsRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) {
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(TestUtils.getTextDocumentInputs(LINKED_ENTITY_INPUTS), options);
}
void recognizeLinkedLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(LINKED_ENTITY_INPUTS, "en");
}
void recognizeLinkedStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(LINKED_ENTITY_INPUTS);
}
void recognizeBatchLinkedEntityRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(LINKED_ENTITY_INPUTS));
}
void extractKeyPhrasesForSingleTextInputRunner(Consumer<String> testRunner) {
testRunner.accept(KEY_PHRASE_INPUTS.get(1));
}
void extractBatchStringKeyPhrasesShowStatsRunner(BiConsumer<List<String>, TextAnalyticsRequestOptions> testRunner) {
testRunner.accept(KEY_PHRASE_INPUTS, new TextAnalyticsRequestOptions().setIncludeStatistics(true));
}
void extractBatchKeyPhrasesShowStatsRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(KEY_PHRASE_INPUTS);
TextAnalyticsRequestOptions options = new TextAnalyticsRequestOptions().setIncludeStatistics(true);
testRunner.accept(textDocumentInputs, options);
}
void extractKeyPhrasesLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(KEY_PHRASE_INPUTS, "en");
}
void extractKeyPhrasesStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(KEY_PHRASE_INPUTS);
}
void extractBatchKeyPhrasesRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(KEY_PHRASE_INPUTS));
}
void duplicateIdRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(getDuplicateTextDocumentInputs());
}
void analyzeSentimentForSingleTextInputRunner(Consumer<String> testRunner) {
testRunner.accept(SENTIMENT_INPUTS.get(0));
}
void analyzeSentimentForTextInputWithOpinionMiningRunner(BiConsumer<String, AnalyzeSentimentOptions> testRunner) {
testRunner.accept(SENTIMENT_INPUTS.get(0), new AnalyzeSentimentOptions().setIncludeOpinionMining(true));
}
void analyzeSentimentLanguageHintRunner(BiConsumer<List<String>, String> testRunner) {
testRunner.accept(SENTIMENT_INPUTS, "en");
}
void analyzeSentimentStringInputRunner(Consumer<List<String>> testRunner) {
testRunner.accept(SENTIMENT_INPUTS);
}
void analyzeBatchSentimentRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(TestUtils.getTextDocumentInputs(SENTIMENT_INPUTS));
}
void analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner(BiConsumer<List<String>, AnalyzeSentimentOptions> testRunner) {
testRunner.accept(SENTIMENT_INPUTS,
new AnalyzeSentimentOptions().setIncludeStatistics(true).setIncludeOpinionMining(true));
}
void analyzeBatchSentimentShowStatsRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsRequestOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(SENTIMENT_INPUTS);
testRunner.accept(textDocumentInputs, new TextAnalyticsRequestOptions().setIncludeStatistics(true));
}
void analyzeBatchSentimentOpinionMining(BiConsumer<List<TextDocumentInput>, AnalyzeSentimentOptions> testRunner) {
final List<TextDocumentInput> textDocumentInputs = TestUtils.getTextDocumentInputs(SENTIMENT_INPUTS);
testRunner.accept(textDocumentInputs, new AnalyzeSentimentOptions().setIncludeOpinionMining(true)
.setIncludeStatistics(true));
}
void emptyTextRunner(Consumer<String> testRunner) {
testRunner.accept("");
}
void emptyListRunner(BiConsumer<List<TextDocumentInput>, String> testRunner) {
testRunner.accept(new ArrayList<>(), "'documents' cannot be empty.");
}
void detectLanguageInputEmptyIdRunner(Consumer<List<DetectLanguageInput>> testRunner) {
testRunner.accept(asList(new DetectLanguageInput("", DETECT_LANGUAGE_INPUTS.get(0))));
}
void emptyDocumentIdRunner(Consumer<List<TextDocumentInput>> testRunner) {
testRunner.accept(asList(new TextDocumentInput("", CATEGORIZED_ENTITY_INPUTS.get(0))));
}
void tooManyDocumentsRunner(Consumer<List<String>> testRunner) {
final String documentInput = CATEGORIZED_ENTITY_INPUTS.get(0);
testRunner.accept(asList(
documentInput, documentInput, documentInput, documentInput, documentInput, documentInput,
documentInput, documentInput, documentInput, documentInput, documentInput, documentInput));
}
void emojiRunner(Consumer<String> testRunner, String text) {
testRunner.accept("👩 " + text);
}
void batchEmojiRunner(Consumer<List<TextDocumentInput>> testRunner, String text) {
testRunner.accept(Collections.singletonList(new TextDocumentInput("0", "👩 " + text)));
}
void emojiWithSkinToneModifierRunner(Consumer<String> testRunner, String text) {
testRunner.accept("👩🏻 " + text);
}
void emojiFamilyRunner(Consumer<String> testRunner, String text) {
testRunner.accept("👩👩👧👧 " + text);
}
void emojiFamilyWithSkinToneModifierRunner(Consumer<String> testRunner, String text) {
testRunner.accept("👩🏻👩🏽👧🏾👦🏿 " + text);
}
void diacriticsNfcRunner(Consumer<String> testRunner, String text) {
testRunner.accept("año " + text);
}
void diacriticsNfdRunner(Consumer<String> testRunner, String text) {
testRunner.accept("año " + text);
}
void koreanNfcRunner(Consumer<String> testRunner, String text) {
testRunner.accept("아가 " + text);
}
void koreanNfdRunner(Consumer<String> testRunner, String text) {
testRunner.accept("아가 " + text);
}
void zalgoTextRunner(Consumer<String> testRunner, String text) {
testRunner.accept("ơ̵̧̧̢̳̘̘͕͔͕̭̟̙͎͈̞͔̈̇̒̃͋̇̅͛̋͛̎́͑̄̐̂̎͗͝m̵͍͉̗̄̏͌̂̑̽̕͝͠g̵̢̡̢̡̨̡̧̛͉̞̯̠̤̣͕̟̫̫̼̰͓̦͖̣̣͎̋͒̈́̓̒̈̍̌̓̅͑̒̓̅̅͒̿̏́͗̀̇͛̏̀̈́̀̊̾̀̔͜͠͝ͅ " + text);
}
void healthcareStringInputRunner(BiConsumer<List<String>, AnalyzeHealthcareEntitiesOptions> testRunner) {
testRunner.accept(HEALTHCARE_INPUTS, new AnalyzeHealthcareEntitiesOptions().setIncludeStatistics(true));
}
void healthcareLroRunner(BiConsumer<List<TextDocumentInput>, AnalyzeHealthcareEntitiesOptions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", HEALTHCARE_INPUTS.get(0)),
new TextDocumentInput("1", HEALTHCARE_INPUTS.get(1))),
new AnalyzeHealthcareEntitiesOptions().setIncludeStatistics(true));
}
void healthcareLroPaginationRunner(
BiConsumer<List<TextDocumentInput>, AnalyzeHealthcareEntitiesOptions> testRunner, int totalDocuments) {
List<TextDocumentInput> documents = new ArrayList<>();
for (int i = 0; i < totalDocuments; i++) {
documents.add(new TextDocumentInput(Integer.toString(i), HEALTHCARE_INPUTS.get(0)));
}
testRunner.accept(documents, new AnalyzeHealthcareEntitiesOptions().setIncludeStatistics(true));
}
void analyzeHealthcareEntitiesForAssertionRunner(
BiConsumer<List<String>, AnalyzeHealthcareEntitiesOptions> testRunner) {
testRunner.accept(asList(
"All female participants that are premenopausal will be required to have a pregnancy test; "
+ "any participant who is pregnant or breastfeeding will not be included"),
new AnalyzeHealthcareEntitiesOptions().setIncludeStatistics(false));
}
void cancelHealthcareLroRunner(BiConsumer<List<TextDocumentInput>, AnalyzeHealthcareEntitiesOptions> testRunner) {
List<TextDocumentInput> documents = new ArrayList<>();
for (int i = 0; i < 10; i++) {
documents.add(new TextDocumentInput(Integer.toString(i), HEALTHCARE_INPUTS.get(0)));
}
testRunner.accept(documents, new AnalyzeHealthcareEntitiesOptions());
}
void analyzeActionsStringInputRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
CATEGORIZED_ENTITY_INPUTS.get(0),
PII_ENTITY_INPUTS.get(0)),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizeEntitiesActions(new RecognizeEntitiesAction())
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction())
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction())
.setAnalyzeSentimentActions(new AnalyzeSentimentAction()));
}
void analyzeBatchActionsRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", CATEGORIZED_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(0))),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizeEntitiesActions(new RecognizeEntitiesAction())
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction())
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction())
.setAnalyzeSentimentActions(new AnalyzeSentimentAction())
);
}
void analyzeActionsWithMultiSameKindActionsRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", CATEGORIZED_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(0))),
new TextAnalyticsActions()
.setRecognizeEntitiesActions(new RecognizeEntitiesAction().setActionName(CUSTOM_ACTION_NAME),
new RecognizeEntitiesAction())
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction().setActionName(CUSTOM_ACTION_NAME),
new RecognizePiiEntitiesAction())
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction().setActionName(CUSTOM_ACTION_NAME),
new ExtractKeyPhrasesAction())
.setRecognizeLinkedEntitiesActions(
new RecognizeLinkedEntitiesAction().setActionName(CUSTOM_ACTION_NAME),
new RecognizeLinkedEntitiesAction())
.setAnalyzeSentimentActions(new AnalyzeSentimentAction().setActionName(CUSTOM_ACTION_NAME),
new AnalyzeSentimentAction())
);
}
void analyzeActionsWithActionNamesRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(new TextDocumentInput("0", CATEGORIZED_ENTITY_INPUTS.get(0))),
new TextAnalyticsActions()
.setRecognizeEntitiesActions(new RecognizeEntitiesAction().setActionName(CUSTOM_ACTION_NAME))
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction().setActionName(CUSTOM_ACTION_NAME))
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction().setActionName(CUSTOM_ACTION_NAME))
.setAnalyzeSentimentActions(new AnalyzeSentimentAction().setActionName(CUSTOM_ACTION_NAME))
);
}
void analyzeBatchActionsPaginationRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner,
int documentsInTotal) {
List<TextDocumentInput> documents = new ArrayList<>();
for (int i = 0; i < documentsInTotal; i++) {
documents.add(new TextDocumentInput(Integer.toString(i), PII_ENTITY_INPUTS.get(0)));
}
testRunner.accept(documents,
new TextAnalyticsActions().setDisplayName("Test1")
.setRecognizeEntitiesActions(new RecognizeEntitiesAction())
.setExtractKeyPhrasesActions(new ExtractKeyPhrasesAction())
.setRecognizePiiEntitiesActions(new RecognizePiiEntitiesAction())
.setRecognizeLinkedEntitiesActions(new RecognizeLinkedEntitiesAction())
.setAnalyzeSentimentActions(new AnalyzeSentimentAction()));
}
void analyzeEntitiesRecognitionRunner(BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", CATEGORIZED_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(0))),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizeEntitiesActions(new RecognizeEntitiesAction()));
}
void analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", PII_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(1))),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizePiiEntitiesActions(
new RecognizePiiEntitiesAction()
.setCategoriesFilter(PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER,
PiiEntityCategory.ABA_ROUTING_NUMBER)
));
}
void analyzePiiEntityRecognitionWithDomainFiltersRunner(
BiConsumer<List<TextDocumentInput>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(
new TextDocumentInput("0", PII_ENTITY_INPUTS.get(0)),
new TextDocumentInput("1", PII_ENTITY_INPUTS.get(1))),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizePiiEntitiesActions(
new RecognizePiiEntitiesAction()
.setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)
));
}
void analyzeLinkedEntityRecognitionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
LINKED_ENTITY_INPUTS,
new TextAnalyticsActions()
.setDisplayName("Test1")
.setRecognizeLinkedEntitiesActions(
new RecognizeLinkedEntitiesAction()));
}
void extractKeyPhrasesRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
asList(CATEGORIZED_ENTITY_INPUTS.get(0), PII_ENTITY_INPUTS.get(0)),
new TextAnalyticsActions()
.setDisplayName("Test1")
.setExtractKeyPhrasesActions(
new ExtractKeyPhrasesAction()));
}
void analyzeSentimentRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
SENTIMENT_INPUTS,
new TextAnalyticsActions()
.setAnalyzeSentimentActions(new AnalyzeSentimentAction()));
}
void analyzeHealthcareEntitiesRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(
HEALTHCARE_INPUTS,
new TextAnalyticsActions()
.setAnalyzeHealthcareEntitiesActions(
new AnalyzeHealthcareEntitiesAction()));
}
void recognizeCustomEntitiesActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(CUSTOM_ENTITIES_INPUT,
new TextAnalyticsActions()
.setRecognizeCustomEntitiesActions(
new RecognizeCustomEntitiesAction(AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_DEPLOYMENT_NAME)));
}
void classifyCustomSingleCategoryActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(CUSTOM_SINGLE_CLASSIFICATION,
new TextAnalyticsActions()
.setSingleLabelClassifyActions(
new SingleLabelClassifyAction(AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_DEPLOYMENT_NAME)));
}
void classifyCustomMultiCategoryActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner) {
testRunner.accept(CUSTOM_MULTI_CLASSIFICATION,
new TextAnalyticsActions()
.setMultiLabelClassifyActions(
new MultiLabelClassifyAction(AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_DEPLOYMENT_NAME)));
}
void recognizeCustomEntitiesRunner(BiConsumer<List<String>, List<String>> testRunner) {
testRunner.accept(CUSTOM_ENTITIES_INPUT,
asList(AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_ENTITIES_DEPLOYMENT_NAME));
}
void classifyCustomSingleLabelRunner(BiConsumer<List<String>, List<String>> testRunner) {
testRunner.accept(CUSTOM_SINGLE_CLASSIFICATION,
asList(AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_SINGLE_CLASSIFICATION_DEPLOYMENT_NAME));
}
void classifyCustomMultiLabelRunner(BiConsumer<List<String>, List<String>> testRunner) {
testRunner.accept(CUSTOM_MULTI_CLASSIFICATION,
asList(AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_PROJECT_NAME,
AZURE_TEXT_ANALYTICS_CUSTOM_MULTI_CLASSIFICATION_DEPLOYMENT_NAME));
}
void extractiveSummaryActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner,
Integer maxSentenceCount, ExtractiveSummarySentencesOrder extractiveSummarySentencesOrder) {
testRunner.accept(SUMMARY_INPUTS,
new TextAnalyticsActions()
.setExtractiveSummaryActions(
new ExtractiveSummaryAction()
.setMaxSentenceCount(maxSentenceCount)
.setOrderBy(extractiveSummarySentencesOrder)));
}
void extractiveSummaryRunner(BiConsumer<List<String>, ExtractiveSummaryOptions> testRunner,
Integer maxSentenceCount, ExtractiveSummarySentencesOrder extractiveSummarySentencesOrder) {
testRunner.accept(SUMMARY_INPUTS,
new ExtractiveSummaryOptions()
.setMaxSentenceCount(maxSentenceCount)
.setOrderBy(extractiveSummarySentencesOrder));
}
void extractiveSummaryMaxOverloadRunner(BiConsumer<List<TextDocumentInput>, ExtractiveSummaryOptions> testRunner,
Integer maxSentenceCount, ExtractiveSummarySentencesOrder extractiveSummarySentencesOrder) {
testRunner.accept(TestUtils.getTextDocumentInputs(SUMMARY_INPUTS),
new ExtractiveSummaryOptions()
.setMaxSentenceCount(maxSentenceCount)
.setOrderBy(extractiveSummarySentencesOrder));
}
void abstractiveSummaryActionRunner(BiConsumer<List<String>, TextAnalyticsActions> testRunner,
Integer sentenceCount) {
testRunner.accept(SUMMARY_INPUTS,
new TextAnalyticsActions()
.setAbstractiveSummaryActions(
new AbstractiveSummaryAction().setSentenceCount(sentenceCount)));
}
void abstractiveSummaryRunner(BiConsumer<List<String>, AbstractiveSummaryOptions> testRunner,
Integer sentenceCount) {
testRunner.accept(SUMMARY_INPUTS, new AbstractiveSummaryOptions().setSentenceCount(sentenceCount));
}
void abstractiveSummaryMaxOverloadRunner(BiConsumer<List<TextDocumentInput>, AbstractiveSummaryOptions> testRunner,
Integer sentenceCount) {
testRunner.accept(TestUtils.getTextDocumentInputs(SUMMARY_INPUTS),
new AbstractiveSummaryOptions().setSentenceCount(sentenceCount));
}
String getEndpoint(boolean isStaticResource) {
return interceptorManager.isPlaybackMode() ? "https:
: isStaticResource ? AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_ENDPOINT : AZURE_TEXT_ANALYTICS_ENDPOINT;
}
String getApiKey(boolean isStaticSource) {
return interceptorManager.isPlaybackMode() ? FAKE_API_KEY
: isStaticSource ? AZURE_TEXT_ANALYTICS_CUSTOM_TEXT_API_KEY : AZURE_TEXT_ANALYTICS_API_KEY;
}
TextAnalyticsClientBuilder getTextAnalyticsClientBuilder(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
TextAnalyticsClientBuilder builder = new TextAnalyticsClientBuilder()
.endpoint(getEndpoint(isStaticResource))
.credential(new AzureKeyCredential(getApiKey(isStaticResource)))
.httpClient(httpClient)
.serviceVersion(serviceVersion);
if (interceptorManager.isRecordMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
static void validateDetectLanguageResultCollectionWithResponse(boolean showStatistics,
DetectLanguageResultCollection expected, int expectedStatusCode,
Response<DetectLanguageResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateDetectLanguageResultCollection(showStatistics, expected, response.getValue());
}
static void validateDetectLanguageResultCollection(boolean showStatistics,
DetectLanguageResultCollection expected, DetectLanguageResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validatePrimaryLanguage(expectedItem.getPrimaryLanguage(), actualItem.getPrimaryLanguage()));
}
static void validateCategorizedEntitiesResultCollectionWithResponse(boolean showStatistics,
RecognizeEntitiesResultCollection expected, int expectedStatusCode,
Response<RecognizeEntitiesResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateCategorizedEntitiesResultCollection(showStatistics, expected, response.getValue());
}
static void validateCategorizedEntitiesResultCollection(boolean showStatistics,
RecognizeEntitiesResultCollection expected, RecognizeEntitiesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validateCategorizedEntities(actualItem.getEntities().stream().collect(Collectors.toList())));
}
static void validatePiiEntitiesResultCollectionWithResponse(boolean showStatistics,
RecognizePiiEntitiesResultCollection expected, int expectedStatusCode,
Response<RecognizePiiEntitiesResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validatePiiEntitiesResultCollection(showStatistics, expected, response.getValue());
}
static void validatePiiEntitiesResultCollection(boolean showStatistics,
RecognizePiiEntitiesResultCollection expected, RecognizePiiEntitiesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) -> {
final PiiEntityCollection expectedPiiEntityCollection = expectedItem.getEntities();
final PiiEntityCollection actualPiiEntityCollection = actualItem.getEntities();
assertEquals(expectedPiiEntityCollection.getRedactedText(), actualPiiEntityCollection.getRedactedText());
validatePiiEntities(
expectedPiiEntityCollection.stream().collect(Collectors.toList()),
actualPiiEntityCollection.stream().collect(Collectors.toList()));
});
}
static void validateLinkedEntitiesResultCollectionWithResponse(boolean showStatistics,
RecognizeLinkedEntitiesResultCollection expected, int expectedStatusCode,
Response<RecognizeLinkedEntitiesResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateLinkedEntitiesResultCollection(showStatistics, expected, response.getValue());
}
static void validateLinkedEntitiesResultCollection(boolean showStatistics,
RecognizeLinkedEntitiesResultCollection expected, RecognizeLinkedEntitiesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validateLinkedEntities(
expectedItem.getEntities().stream().collect(Collectors.toList()),
actualItem.getEntities().stream().collect(Collectors.toList())));
}
static void validateExtractKeyPhrasesResultCollectionWithResponse(boolean showStatistics,
ExtractKeyPhrasesResultCollection expected, int expectedStatusCode,
Response<ExtractKeyPhrasesResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateExtractKeyPhrasesResultCollection(showStatistics, expected, response.getValue());
}
static void validateClassifyDocumentResult(ClassifyDocumentResult expect, ClassifyDocumentResult actual) {
assertEquals(expect.getId(), actual.getId());
if (expect.isError()) {
assertNotNull(actual.isError());
} else {
assertNull(actual.getError());
List<ClassificationCategory> actualClassifications =
actual.getClassifications().stream().collect(Collectors.toList());
List<ClassificationCategory> expectClassifications =
expect.getClassifications().stream().collect(Collectors.toList());
assertEquals(expectClassifications.size(), actualClassifications.size());
for (int i = 0; i < expectClassifications.size(); i++) {
validateClassificationCategory(expectClassifications.get(i), actualClassifications.get(i));
}
}
}
static void validateClassificationCategory(ClassificationCategory expect, ClassificationCategory actual) {
assertEquals(expect.getCategory(), actual.getCategory());
assertNotNull(actual.getConfidenceScore());
}
static void validateExtractKeyPhrasesResultCollection(boolean showStatistics,
ExtractKeyPhrasesResultCollection expected, ExtractKeyPhrasesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validateKeyPhrases(
expectedItem.getKeyPhrases().stream().collect(Collectors.toList()),
actualItem.getKeyPhrases().stream().collect(Collectors.toList())));
}
static void validateAnalyzeSentimentResultCollectionWithResponse(boolean showStatistics,
boolean includeOpinionMining, AnalyzeSentimentResultCollection expected,
int expectedStatusCode, Response<AnalyzeSentimentResultCollection> response) {
assertNotNull(response);
assertEquals(expectedStatusCode, response.getStatusCode());
validateAnalyzeSentimentResultCollection(showStatistics, includeOpinionMining, expected, response.getValue());
}
static void validateAnalyzeSentimentResultCollection(boolean showStatistics, boolean includeOpinionMining,
AnalyzeSentimentResultCollection expected, AnalyzeSentimentResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) ->
validateDocumentSentiment(includeOpinionMining, expectedItem.getDocumentSentiment(),
actualItem.getDocumentSentiment()));
}
static void validateAnalyzeHealthcareEntitiesResultCollection(boolean showStatistics,
AnalyzeHealthcareEntitiesResultCollection expected, AnalyzeHealthcareEntitiesResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual,
(expectedItem, actualItem) -> validateHealthcareEntityDocumentResult(expectedItem, actualItem));
}
/**
* Helper method to validate a single detected language.
*
* @param expectedLanguage detectedLanguage returned by the service.
* @param actualLanguage detectedLanguage returned by the API.
*/
static void validatePrimaryLanguage(DetectedLanguage expectedLanguage, DetectedLanguage actualLanguage) {
assertEquals(expectedLanguage.getName(), actualLanguage.getName());
assertEquals(expectedLanguage.getIso6391Name(), actualLanguage.getIso6391Name());
assertNotNull(actualLanguage.getConfidenceScore());
}
/**
* Helper method to validate a single categorized entity.
*
* @param actualCategorizedEntity CategorizedEntity returned by the API.
*/
static void validateCategorizedEntity(CategorizedEntity actualCategorizedEntity) {
assertNotNull(actualCategorizedEntity.getText());
assertNotNull(actualCategorizedEntity.getOffset());
assertNotNull(actualCategorizedEntity.getCategory());
assertNotNull(actualCategorizedEntity.getConfidenceScore());
}
/**
* Helper method to validate a single Personally Identifiable Information entity.
*
* @param expectedPiiEntity PiiEntity returned by the service.
* @param actualPiiEntity PiiEntity returned by the API.
*/
static void validatePiiEntity(PiiEntity expectedPiiEntity, PiiEntity actualPiiEntity) {
assertEquals(expectedPiiEntity.getOffset(), actualPiiEntity.getOffset());
assertEquals(expectedPiiEntity.getSubcategory(), actualPiiEntity.getSubcategory());
assertEquals(expectedPiiEntity.getText(), actualPiiEntity.getText());
assertEquals(expectedPiiEntity.getCategory(), actualPiiEntity.getCategory());
assertNotNull(actualPiiEntity.getConfidenceScore());
}
/**
* Helper method to validate a single linked entity.
*
* @param expectedLinkedEntity LinkedEntity returned by the service.
* @param actualLinkedEntity LinkedEntity returned by the API.
*/
static void validateLinkedEntity(LinkedEntity expectedLinkedEntity, LinkedEntity actualLinkedEntity) {
assertEquals(expectedLinkedEntity.getName(), actualLinkedEntity.getName());
assertEquals(expectedLinkedEntity.getDataSource(), actualLinkedEntity.getDataSource());
assertEquals(expectedLinkedEntity.getLanguage(), actualLinkedEntity.getLanguage());
if (interceptorManagerTestBase.isPlaybackMode()) {
assertEquals(REDACTED, actualLinkedEntity.getUrl());
} else {
assertEquals(expectedLinkedEntity.getUrl(), actualLinkedEntity.getUrl());
}
assertEquals(expectedLinkedEntity.getDataSourceEntityId(), actualLinkedEntity.getDataSourceEntityId());
validateLinkedEntityMatches(expectedLinkedEntity.getMatches().stream().collect(Collectors.toList()),
actualLinkedEntity.getMatches().stream().collect(Collectors.toList()));
}
/**
* Helper method to validate a single key phrase.
*
* @param expectedKeyPhrases key phrases returned by the service.
* @param actualKeyPhrases key phrases returned by the API.
*/
static void validateKeyPhrases(List<String> expectedKeyPhrases, List<String> actualKeyPhrases) {
assertEquals(expectedKeyPhrases.size(), actualKeyPhrases.size());
Collections.sort(expectedKeyPhrases);
Collections.sort(actualKeyPhrases);
for (int i = 0; i < expectedKeyPhrases.size(); i++) {
assertEquals(expectedKeyPhrases.get(i), actualKeyPhrases.get(i));
}
}
/**
* Helper method to validate the list of categorized entities.
*
* @param actualCategorizedEntityList categorizedEntities returned by the API.
*/
static void validateCategorizedEntities(List<CategorizedEntity> actualCategorizedEntityList) {
for (int i = 0; i < actualCategorizedEntityList.size(); i++) {
validateCategorizedEntity(actualCategorizedEntityList.get(i));
}
}
/**
* Helper method to validate the list of Personally Identifiable Information entities.
*
* @param expectedPiiEntityList piiEntities returned by the service.
* @param actualPiiEntityList piiEntities returned by the API.
*/
static void validatePiiEntities(List<PiiEntity> expectedPiiEntityList, List<PiiEntity> actualPiiEntityList) {
assertEquals(expectedPiiEntityList.size(), actualPiiEntityList.size());
expectedPiiEntityList.sort(Comparator.comparing(PiiEntity::getText));
actualPiiEntityList.sort(Comparator.comparing(PiiEntity::getText));
for (int i = 0; i < expectedPiiEntityList.size(); i++) {
PiiEntity expectedPiiEntity = expectedPiiEntityList.get(i);
PiiEntity actualPiiEntity = actualPiiEntityList.get(i);
validatePiiEntity(expectedPiiEntity, actualPiiEntity);
}
}
/**
* Helper method to validate the list of linked entities.
*
* @param expectedLinkedEntityList linkedEntities returned by the service.
* @param actualLinkedEntityList linkedEntities returned by the API.
*/
static void validateLinkedEntities(List<LinkedEntity> expectedLinkedEntityList,
List<LinkedEntity> actualLinkedEntityList) {
assertEquals(expectedLinkedEntityList.size(), actualLinkedEntityList.size());
expectedLinkedEntityList.sort(Comparator.comparing(LinkedEntity::getName));
actualLinkedEntityList.sort(Comparator.comparing(LinkedEntity::getName));
for (int i = 0; i < expectedLinkedEntityList.size(); i++) {
LinkedEntity expectedLinkedEntity = expectedLinkedEntityList.get(i);
LinkedEntity actualLinkedEntity = actualLinkedEntityList.get(i);
validateLinkedEntity(expectedLinkedEntity, actualLinkedEntity);
}
}
/**
* Helper method to validate the list of sentence sentiment. Can't really validate score numbers because it
* frequently changed by background model computation.
*
* @param expectedSentimentList a list of analyzed sentence sentiment returned by the service.
* @param actualSentimentList a list of analyzed sentence sentiment returned by the API.
*/
static void validateSentenceSentimentList(boolean includeOpinionMining, List<SentenceSentiment> expectedSentimentList,
List<SentenceSentiment> actualSentimentList) {
assertEquals(expectedSentimentList.size(), actualSentimentList.size());
for (int i = 0; i < expectedSentimentList.size(); i++) {
validateSentenceSentiment(includeOpinionMining, expectedSentimentList.get(i), actualSentimentList.get(i));
}
}
/**
* Helper method to validate one pair of analyzed sentiments. Can't really validate score numbers because it
* frequently changed by background model computation.
*
* @param expectedSentiment analyzed sentence sentiment returned by the service.
* @param actualSentiment analyzed sentence sentiment returned by the API.
*/
static void validateSentenceSentiment(boolean includeOpinionMining, SentenceSentiment expectedSentiment,
SentenceSentiment actualSentiment) {
assertEquals(expectedSentiment.getSentiment(), actualSentiment.getSentiment());
assertEquals(expectedSentiment.getText(), actualSentiment.getText());
assertEquals(expectedSentiment.getOffset(), actualSentiment.getOffset());
assertEquals(expectedSentiment.getLength(), actualSentiment.getLength());
if (includeOpinionMining) {
validateSentenceOpinions(expectedSentiment.getOpinions().stream().collect(Collectors.toList()),
actualSentiment.getOpinions().stream().collect(Collectors.toList()));
} else {
assertNull(actualSentiment.getOpinions());
}
}
/**
* Helper method to validate sentence's opinions.
*
* @param expectedSentenceOpinions a list of sentence opinions returned by the service.
* @param actualSentenceOpinions a list of sentence opinions returned by the API.
*/
static void validateSentenceOpinions(List<SentenceOpinion> expectedSentenceOpinions,
List<SentenceOpinion> actualSentenceOpinions) {
assertEquals(expectedSentenceOpinions.size(), actualSentenceOpinions.size());
for (int i = 0; i < actualSentenceOpinions.size(); i++) {
final SentenceOpinion expectedSentenceOpinion = expectedSentenceOpinions.get(i);
final SentenceOpinion actualSentenceOpinion = actualSentenceOpinions.get(i);
validateTargetSentiment(expectedSentenceOpinion.getTarget(), actualSentenceOpinion.getTarget());
validateAssessmentList(expectedSentenceOpinion.getAssessments().stream().collect(Collectors.toList()),
actualSentenceOpinion.getAssessments().stream().collect(Collectors.toList()));
}
}
/**
* Helper method to validate target sentiment.
*
* @param expected An expected target sentiment.
* @param actual An actual target sentiment.
*/
static void validateTargetSentiment(TargetSentiment expected, TargetSentiment actual) {
assertEquals(expected.getSentiment(), actual.getSentiment());
assertEquals(expected.getText(), actual.getText());
assertEquals(expected.getOffset(), actual.getOffset());
}
/**
* Helper method to validate a list of {@link AssessmentSentiment}.
*
* @param expected A list of expected assessment sentiments.
* @param actual A list of actual assessment sentiments.
*/
static void validateAssessmentList(List<AssessmentSentiment> expected, List<AssessmentSentiment> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < expected.size(); i++) {
validateAssessmentSentiment(expected.get(i), actual.get(i));
}
}
/**
* Helper method to validate assessment sentiment.
*
* @param expect An expected assessment sentiment.
* @param actual An actual assessment sentiment.
*/
static void validateAssessmentSentiment(AssessmentSentiment expect, AssessmentSentiment actual) {
assertEquals(expect.getSentiment(), actual.getSentiment());
assertEquals(expect.getText(), actual.getText());
assertEquals(expect.isNegated(), actual.isNegated());
assertEquals(expect.getOffset(), actual.getOffset());
}
/**
* Helper method to validate one pair of analyzed sentiments. Can't really validate score numbers because it
* frequently changed by background model computation.
*
* @param expectedSentiment analyzed document sentiment returned by the service.
* @param actualSentiment analyzed document sentiment returned by the API.
*/
static void validateDocumentSentiment(boolean includeOpinionMining, DocumentSentiment expectedSentiment,
DocumentSentiment actualSentiment) {
assertEquals(expectedSentiment.getSentiment(), actualSentiment.getSentiment());
validateSentenceSentimentList(includeOpinionMining,
expectedSentiment.getSentences().stream().collect(Collectors.toList()),
actualSentiment.getSentences().stream().collect(Collectors.toList()));
}
static void validateLabelClassificationResult(ClassifyDocumentResult documentResult) {
assertNotNull(documentResult.getId());
if (documentResult.isError()) {
assertNotNull(documentResult.getError());
} else {
assertNull(documentResult.getError());
for (ClassificationCategory classification : documentResult.getClassifications()) {
validateDocumentClassification(classification);
}
}
}
static void validateDocumentClassification(ClassificationCategory classificationCategory) {
assertNotNull(classificationCategory.getCategory());
assertNotNull(classificationCategory.getConfidenceScore());
}
static void validateEntityAssertion(HealthcareEntityAssertion expected, HealthcareEntityAssertion actual) {
if (actual == expected) {
return;
}
assertEquals(expected.getConditionality(), actual.getConditionality());
assertEquals(expected.getAssociation(), actual.getAssociation());
assertEquals(expected.getCertainty(), actual.getCertainty());
}
static void validateEntityDataSourceList(IterableStream<EntityDataSource> expected,
IterableStream<EntityDataSource> actual) {
if (expected == actual) {
return;
} else if (expected == null || actual == null) {
assertTrue(false);
}
}
static void validateHealthcareEntityDocumentResult(AnalyzeHealthcareEntitiesResult expected,
AnalyzeHealthcareEntitiesResult actual) {
validateHealthcareEntityRelations(expected.getEntityRelations().stream().collect(Collectors.toList()),
actual.getEntityRelations().stream().collect(Collectors.toList()));
validateHealthcareEntities(expected.getEntities().stream().collect(Collectors.toList()),
actual.getEntities().stream().collect(Collectors.toList()));
}
static void validateHealthcareEntityRelations(List<HealthcareEntityRelation> expected,
List<HealthcareEntityRelation> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < expected.size(); i++) {
validateHealthcareEntityRelation(expected.get(i), actual.get(i));
}
}
static void validateHealthcareEntityRelation(HealthcareEntityRelation expected, HealthcareEntityRelation actual) {
final List<HealthcareEntityRelationRole> expectedRoles = expected.getRoles().stream().collect(Collectors.toList());
final List<HealthcareEntityRelationRole> actualRoles = actual.getRoles().stream().collect(Collectors.toList());
assertEquals(expected.getRelationType(), actual.getRelationType());
assertNotNull(actual.getConfidenceScore());
for (int i = 0; i < expectedRoles.size(); i++) {
validateHealthcareEntityRelationRole(expectedRoles.get(i), actualRoles.get(i));
}
}
static void validateHealthcareEntityRelationRole(HealthcareEntityRelationRole expected,
HealthcareEntityRelationRole actual) {
assertEquals(expected.getName(), actual.getName());
validateHealthcareEntity(expected.getEntity(), actual.getEntity());
}
static void validateHealthcareEntities(List<HealthcareEntity> expected, List<HealthcareEntity> actual) {
assertEquals(expected.size(), actual.size());
expected.sort(Comparator.comparing(HealthcareEntity::getText));
actual.sort(Comparator.comparing(HealthcareEntity::getText));
for (int i = 0; i < expected.size(); i++) {
validateHealthcareEntity(expected.get(i), actual.get(i));
}
}
static void validateAnalyzeHealthcareEntitiesResultCollectionList(boolean showStatistics,
List<AnalyzeHealthcareEntitiesResultCollection> expected,
List<AnalyzeHealthcareEntitiesResultCollection> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAnalyzeHealthcareEntitiesResultCollection(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateAnalyzeBatchActionsResultList(boolean showStatistics, boolean includeOpinionMining,
List<AnalyzeActionsResult> expected, List<AnalyzeActionsResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAnalyzeActionsResult(showStatistics, includeOpinionMining, expected.get(i), actual.get(i));
}
}
static void validateAnalyzeActionsResult(boolean showStatistics, boolean includeOpinionMining,
AnalyzeActionsResult expected, AnalyzeActionsResult actual) {
validateRecognizeEntitiesActionResults(showStatistics,
expected.getRecognizeEntitiesResults().stream().collect(Collectors.toList()),
actual.getRecognizeEntitiesResults().stream().collect(Collectors.toList()));
validateRecognizeLinkedEntitiesActionResults(showStatistics,
expected.getRecognizeLinkedEntitiesResults().stream().collect(Collectors.toList()),
actual.getRecognizeLinkedEntitiesResults().stream().collect(Collectors.toList()));
validateRecognizePiiEntitiesActionResults(showStatistics,
expected.getRecognizePiiEntitiesResults().stream().collect(Collectors.toList()),
actual.getRecognizePiiEntitiesResults().stream().collect(Collectors.toList()));
validateAnalyzeHealthcareEntitiesActionResults(showStatistics,
expected.getAnalyzeHealthcareEntitiesResults().stream().collect(Collectors.toList()),
actual.getAnalyzeHealthcareEntitiesResults().stream().collect(Collectors.toList()));
validateExtractKeyPhrasesActionResults(showStatistics,
expected.getExtractKeyPhrasesResults().stream().collect(Collectors.toList()),
actual.getExtractKeyPhrasesResults().stream().collect(Collectors.toList()));
validateAnalyzeSentimentActionResults(showStatistics, includeOpinionMining,
expected.getAnalyzeSentimentResults().stream().collect(Collectors.toList()),
actual.getAnalyzeSentimentResults().stream().collect(Collectors.toList()));
validateExtractiveSummaryActionResults(showStatistics,
expected.getExtractiveSummaryResults().stream().collect(Collectors.toList()),
actual.getExtractiveSummaryResults().stream().collect(Collectors.toList()));
validateAbstractiveSummaryActionResults(showStatistics,
expected.getAbstractiveSummaryResults().stream().collect(Collectors.toList()),
actual.getAbstractiveSummaryResults().stream().collect(Collectors.toList()));
}
static void validateRecognizeEntitiesActionResults(boolean showStatistics,
List<RecognizeEntitiesActionResult> expected, List<RecognizeEntitiesActionResult> actual) {
for (int i = 0; i < actual.size(); i++) {
validateRecognizeEntitiesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateRecognizeLinkedEntitiesActionResults(boolean showStatistics,
List<RecognizeLinkedEntitiesActionResult> expected, List<RecognizeLinkedEntitiesActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateRecognizeLinkedEntitiesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateRecognizePiiEntitiesActionResults(boolean showStatistics,
List<RecognizePiiEntitiesActionResult> expected, List<RecognizePiiEntitiesActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateRecognizePiiEntitiesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateAnalyzeHealthcareEntitiesActionResults(boolean showStatistics,
List<AnalyzeHealthcareEntitiesActionResult> expected, List<AnalyzeHealthcareEntitiesActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAnalyzeHealthcareEntitiesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateExtractKeyPhrasesActionResults(boolean showStatistics,
List<ExtractKeyPhrasesActionResult> expected, List<ExtractKeyPhrasesActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateExtractKeyPhrasesActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateAnalyzeSentimentActionResults(boolean showStatistics, boolean includeOpinionMining,
List<AnalyzeSentimentActionResult> expected, List<AnalyzeSentimentActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAnalyzeSentimentActionResult(showStatistics, includeOpinionMining, expected.get(i), actual.get(i));
}
}
static void validateRecognizeEntitiesActionResult(boolean showStatistics,
RecognizeEntitiesActionResult expected, RecognizeEntitiesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateCategorizedEntitiesResultCollection(showStatistics, expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateRecognizeLinkedEntitiesActionResult(boolean showStatistics,
RecognizeLinkedEntitiesActionResult expected, RecognizeLinkedEntitiesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateLinkedEntitiesResultCollection(showStatistics, expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateRecognizePiiEntitiesActionResult(boolean showStatistics,
RecognizePiiEntitiesActionResult expected, RecognizePiiEntitiesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validatePiiEntitiesResultCollection(showStatistics, expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateAnalyzeHealthcareEntitiesActionResult(boolean showStatistics,
AnalyzeHealthcareEntitiesActionResult expected, AnalyzeHealthcareEntitiesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateAnalyzeHealthcareEntitiesResultCollection(showStatistics,
expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateExtractKeyPhrasesActionResult(boolean showStatistics,
ExtractKeyPhrasesActionResult expected, ExtractKeyPhrasesActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateExtractKeyPhrasesResultCollection(showStatistics, expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateAnalyzeSentimentActionResult(boolean showStatistics, boolean includeOpinionMining,
AnalyzeSentimentActionResult expected, AnalyzeSentimentActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateAnalyzeSentimentResultCollection(showStatistics, includeOpinionMining,
expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
/**
* Helper method to verify {@link TextAnalyticsResult documents} returned in a batch request.
*/
static <T extends TextAnalyticsResult, H extends IterableStream<T>> void validateTextAnalyticsResult(
boolean showStatistics, H expectedResults, H actualResults, BiConsumer<T, T> additionalAssertions) {
final Map<String, T> expected = expectedResults.stream().collect(
Collectors.toMap(TextAnalyticsResult::getId, r -> r));
final Map<String, T> actual = actualResults.stream().collect(
Collectors.toMap(TextAnalyticsResult::getId, r -> r));
assertEquals(expected.size(), actual.size());
if (showStatistics) {
if (expectedResults instanceof AnalyzeHealthcareEntitiesResultCollection) {
validateBatchStatistics(((AnalyzeHealthcareEntitiesResultCollection) expectedResults).getStatistics(),
((AnalyzeHealthcareEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof AnalyzeSentimentResultCollection) {
validateBatchStatistics(((AnalyzeSentimentResultCollection) expectedResults).getStatistics(),
((AnalyzeSentimentResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ClassifyDocumentResultCollection) {
validateBatchStatistics(((ClassifyDocumentResultCollection) expectedResults).getStatistics(),
((ClassifyDocumentResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof DetectLanguageResultCollection) {
validateBatchStatistics(((DetectLanguageResultCollection) expectedResults).getStatistics(),
((DetectLanguageResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ExtractKeyPhrasesResultCollection) {
validateBatchStatistics(((ExtractKeyPhrasesResultCollection) expectedResults).getStatistics(),
((ExtractKeyPhrasesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ExtractiveSummaryResultCollection) {
validateBatchStatistics(((ExtractiveSummaryResultCollection) expectedResults).getStatistics(),
((ExtractiveSummaryResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeCustomEntitiesResultCollection) {
validateBatchStatistics(((RecognizeCustomEntitiesResultCollection) expectedResults).getStatistics(),
((RecognizeCustomEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeEntitiesResultCollection) {
validateBatchStatistics(((RecognizeEntitiesResultCollection) expectedResults).getStatistics(),
((RecognizeEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeLinkedEntitiesResultCollection) {
validateBatchStatistics(((RecognizeLinkedEntitiesResultCollection) expectedResults).getStatistics(),
((RecognizeLinkedEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizePiiEntitiesResultCollection) {
validateBatchStatistics(((RecognizePiiEntitiesResultCollection) expectedResults).getStatistics(),
((RecognizePiiEntitiesResultCollection) actualResults).getStatistics());
}
} else {
if (expectedResults instanceof AnalyzeHealthcareEntitiesResultCollection) {
assertNull(((AnalyzeHealthcareEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof AnalyzeSentimentResultCollection) {
assertNull(((AnalyzeSentimentResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ClassifyDocumentResultCollection) {
assertNull(((ClassifyDocumentResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof DetectLanguageResultCollection) {
assertNull(((DetectLanguageResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ExtractKeyPhrasesResultCollection) {
assertNull(((ExtractKeyPhrasesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof ExtractiveSummaryResultCollection) {
assertNull(((ExtractiveSummaryResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeCustomEntitiesResultCollection) {
assertNull(((RecognizeCustomEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeEntitiesResultCollection) {
assertNull(((RecognizeEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizeLinkedEntitiesResultCollection) {
assertNull(((RecognizeLinkedEntitiesResultCollection) actualResults).getStatistics());
} else if (expectedResults instanceof RecognizePiiEntitiesResultCollection) {
assertNull(((RecognizePiiEntitiesResultCollection) actualResults).getStatistics());
}
}
expected.forEach((key, expectedValue) -> {
T actualValue = actual.get(key);
assertNotNull(actualValue);
if (showStatistics) {
validateDocumentStatistics(expectedValue.getStatistics(), actualValue.getStatistics());
}
if (expectedValue.getError() == null) {
assertNull(actualValue.getError());
} else {
assertNotNull(actualValue.getError());
assertEquals(expectedValue.getError().getErrorCode(), actualValue.getError().getErrorCode());
validateErrorDocument(expectedValue.getError(), actualValue.getError());
}
additionalAssertions.accept(expectedValue, actualValue);
});
}
/**
* Helper method to verify TextBatchStatistics.
*
* @param expectedStatistics the expected value for TextBatchStatistics.
* @param actualStatistics the value returned by API.
*/
private static void validateBatchStatistics(TextDocumentBatchStatistics expectedStatistics,
TextDocumentBatchStatistics actualStatistics) {
assertEquals(expectedStatistics.getDocumentCount(), actualStatistics.getDocumentCount());
assertEquals(expectedStatistics.getInvalidDocumentCount(), actualStatistics.getInvalidDocumentCount());
assertEquals(expectedStatistics.getValidDocumentCount(), actualStatistics.getValidDocumentCount());
assertEquals(expectedStatistics.getTransactionCount(), actualStatistics.getTransactionCount());
}
/**
* Helper method to verify TextDocumentStatistics.
*
* @param expected the expected value for TextDocumentStatistics.
* @param actual the value returned by API.
*/
private static void validateDocumentStatistics(TextDocumentStatistics expected, TextDocumentStatistics actual) {
assertEquals(expected.getCharacterCount(), actual.getCharacterCount());
assertEquals(expected.getTransactionCount(), actual.getTransactionCount());
}
/**
* Helper method to verify LinkedEntityMatches.
*
* @param expectedLinkedEntityMatches the expected value for LinkedEntityMatches.
* @param actualLinkedEntityMatches the value returned by API.
*/
private static void validateLinkedEntityMatches(List<LinkedEntityMatch> expectedLinkedEntityMatches,
List<LinkedEntityMatch> actualLinkedEntityMatches) {
assertEquals(expectedLinkedEntityMatches.size(), actualLinkedEntityMatches.size());
expectedLinkedEntityMatches.sort(Comparator.comparing(LinkedEntityMatch::getText));
actualLinkedEntityMatches.sort(Comparator.comparing(LinkedEntityMatch::getText));
for (int i = 0; i < expectedLinkedEntityMatches.size(); i++) {
LinkedEntityMatch expectedLinkedEntity = expectedLinkedEntityMatches.get(i);
LinkedEntityMatch actualLinkedEntity = actualLinkedEntityMatches.get(i);
assertEquals(expectedLinkedEntity.getText(), actualLinkedEntity.getText());
assertEquals(expectedLinkedEntity.getOffset(), actualLinkedEntity.getOffset());
assertNotNull(actualLinkedEntity.getConfidenceScore());
}
}
/**
* Helper method to verify the error document.
*
* @param expectedError the Error returned from the service.
* @param actualError the Error returned from the API.
*/
static void validateErrorDocument(TextAnalyticsError expectedError, TextAnalyticsError actualError) {
assertEquals(expectedError.getErrorCode(), actualError.getErrorCode());
assertNotNull(actualError.getMessage());
}
static void validateExtractiveSummaryResultCollection(boolean showStatistics,
ExtractiveSummaryResultCollection expected, ExtractiveSummaryResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual,
(expectedItem, actualItem) -> validateDocumentExtractiveSummaryResult(expectedItem, actualItem));
}
static void validateDocumentExtractiveSummaryResult(ExtractiveSummaryResult expect,
ExtractiveSummaryResult actual) {
validateExtractiveSummarySentenceList(
expect.getSentences().stream().collect(Collectors.toList()),
actual.getSentences().stream().collect(Collectors.toList())
);
}
static void validateExtractiveSummaryActionResults(boolean showStatistics,
List<ExtractiveSummaryActionResult> expected, List<ExtractiveSummaryActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateExtractiveSummaryActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateExtractiveSummaryActionResult(boolean showStatistics,
ExtractiveSummaryActionResult expected,
ExtractiveSummaryActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateExtractiveSummaryResultCollection(showStatistics,
expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateExtractiveSummarySentenceList(List<ExtractiveSummarySentence> expect,
List<ExtractiveSummarySentence> actual) {
assertEquals(expect.size(), actual.size());
for (int i = 0; i < expect.size(); i++) {
validateExtractiveSummarySentence(expect.get(i), actual.get(i));
}
}
static void validateExtractiveSummarySentence(ExtractiveSummarySentence expect, ExtractiveSummarySentence actual) {
assertEquals(expect.getText(), actual.getText());
assertEquals(expect.getOffset(), actual.getOffset());
assertEquals(expect.getLength(), actual.getLength());
assertNotNull(actual.getRankScore());
}
static boolean isAscendingOrderByOffSet(List<ExtractiveSummarySentence> extractiveSummarySentences) {
int currMin = Integer.MIN_VALUE;
for (ExtractiveSummarySentence extractiveSummarySentence : extractiveSummarySentences) {
if (extractiveSummarySentence.getOffset() <= currMin) {
return false;
} else {
currMin = extractiveSummarySentence.getOffset();
}
}
return true;
}
static boolean isDescendingOrderByRankScore(List<ExtractiveSummarySentence> extractiveSummarySentences) {
double currentMax = Double.MAX_VALUE;
for (ExtractiveSummarySentence extractiveSummarySentence : extractiveSummarySentences) {
if (extractiveSummarySentence.getRankScore() > currentMax) {
return false;
} else {
currentMax = extractiveSummarySentence.getRankScore();
}
}
return true;
}
static void validateAbstractiveSummaryActionResults(boolean showStatistics,
List<AbstractiveSummaryActionResult> expected, List<AbstractiveSummaryActionResult> actual) {
assertEquals(expected.size(), actual.size());
for (int i = 0; i < actual.size(); i++) {
validateAbstractiveSummaryActionResult(showStatistics, expected.get(i), actual.get(i));
}
}
static void validateAbstractiveSummaryActionResult(boolean showStatistics,
AbstractiveSummaryActionResult expected, AbstractiveSummaryActionResult actual) {
assertEquals(expected.isError(), actual.isError());
if (actual.isError()) {
if (expected.getError() == null) {
assertNull(actual.getError());
} else {
assertNotNull(actual.getError());
validateErrorDocument(expected.getError(), actual.getError());
}
} else {
validateAbstractiveSummaryResultCollection(showStatistics,
expected.getDocumentsResults(), actual.getDocumentsResults());
}
}
static void validateAbstractiveSummaryResultCollection(boolean showStatistics,
AbstractiveSummaryResultCollection expected, AbstractiveSummaryResultCollection actual) {
validateTextAnalyticsResult(showStatistics, expected, actual,
(expectedItem, actualItem) -> validateDocumentAbstractiveSummaryResult(actualItem));
}
static void validateDocumentAbstractiveSummaryResult(AbstractiveSummaryResult actual) {
validateAbstractiveSummaries(actual.getSummaries().stream().collect(Collectors.toList()));
}
static void validateAbstractiveSummaries(List<AbstractiveSummary> actual) {
for (int i = 0; i < actual.size(); i++) {
final AbstractiveSummary abstractiveSummary = actual.get(i);
assertNotNull(abstractiveSummary.getText());
validateSummaryContextList(
abstractiveSummary.getContexts().stream().collect(Collectors.toList()));
}
}
static void validateSummaryContextList(List<AbstractiveSummaryContext> actual) {
for (int i = 0; i < actual.size(); i++) {
AbstractiveSummaryContext actualAbstractiveSummaryContext = actual.get(i);
assertNotNull(actualAbstractiveSummaryContext.getOffset());
assertNotNull(actualAbstractiveSummaryContext.getLength());
}
}
} | |
For tertiary conditions, I think we should make the `if` and `else` case be in separate lines to be consistent and easy to ready. | public Duration calculateRetryDelay(Throwable lastException, int retryCount) {
if (retryOptions.getDelay() == Duration.ZERO || retryOptions.getMaxDelay() == Duration.ZERO
|| retryCount > retryOptions.getMaxRetries()) {
return null;
}
final Duration baseDelay;
if (lastException instanceof AmqpException && isRetriableException(lastException)) {
baseDelay = ((AmqpException) lastException).getErrorCondition() == SERVER_BUSY_ERROR
? retryOptions.getDelay().plus(SERVER_BUSY_WAIT_TIME) : retryOptions.getDelay();
} else if (lastException instanceof TimeoutException) {
baseDelay = retryOptions.getDelay();
} else {
baseDelay = null;
}
if (baseDelay == null) {
return null;
}
final Duration delay = calculateRetryDelay(retryCount, baseDelay, baseJitter, ThreadLocalRandom.current());
return delay.compareTo(retryOptions.getMaxDelay()) <= 0 ? delay : retryOptions.getMaxDelay();
} | ? retryOptions.getDelay().plus(SERVER_BUSY_WAIT_TIME) : retryOptions.getDelay(); | public Duration calculateRetryDelay(Throwable lastException, int retryCount) {
if (retryOptions.getDelay() == Duration.ZERO
|| retryOptions.getMaxDelay() == Duration.ZERO
|| retryCount > retryOptions.getMaxRetries()) {
return null;
}
final Duration baseDelay;
if (lastException instanceof AmqpException && isRetriableException(lastException)) {
baseDelay = ((AmqpException) lastException).getErrorCondition() == SERVER_BUSY_ERROR
? retryOptions.getDelay().plus(SERVER_BUSY_WAIT_TIME)
: retryOptions.getDelay();
} else if (lastException instanceof TimeoutException) {
baseDelay = retryOptions.getDelay();
} else {
baseDelay = null;
}
if (baseDelay == null) {
return null;
}
final Duration delay = calculateRetryDelay(retryCount, baseDelay, baseJitter, ThreadLocalRandom.current());
return delay.compareTo(retryOptions.getMaxDelay()) <= 0 ? delay : retryOptions.getMaxDelay();
} | class AmqpRetryPolicy {
static final long NANOS_PER_SECOND = 1000_000_000L;
private static final double JITTER_FACTOR = 0.08;
private final AmqpRetryOptions retryOptions;
private final Duration baseJitter;
/**
* Creates an instance with the given retry options. If {@link AmqpRetryOptions
* AmqpRetryOptions
* zero, requests failing with a retriable exception will not be retried.
*
* @param retryOptions The options to set on this retry policy.
* @throws NullPointerException if {@code retryOptions} is {@code null}.
*/
protected AmqpRetryPolicy(AmqpRetryOptions retryOptions) {
Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
this.retryOptions = retryOptions;
final double jitterInNanos = retryOptions.getDelay().getSeconds() * JITTER_FACTOR * NANOS_PER_SECOND;
baseJitter = Duration.ofNanos((long) jitterInNanos);
}
/**
* Gets the set of options used to configure this retry policy.
*
* @return The set of options used to configure this retry policy.
*/
public AmqpRetryOptions getRetryOptions() {
return retryOptions;
}
/**
* Gets the maximum number of retry attempts.
*
* @return The maximum number of retry attempts.
*/
public int getMaxRetries() {
return retryOptions.getMaxRetries();
}
/**
* Calculates the amount of time to delay before the next retry attempt.
*
* @param lastException The last exception that was observed for the operation to be retried.
* @param retryCount The number of attempts that have been made, including the initial attempt before any retries.
* @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation
* is no longer eligible to be retried.
*/
/**
* Calculates the amount of time to delay before the next retry attempt based on the {@code retryCount},
* {@code baseDelay}, and {@code baseJitter}.
*
* @param retryCount The number of attempts that have been made, including the initial attempt before any retries.
* @param baseDelay The base delay for a retry attempt.
* @param baseJitter The base jitter delay.
* @param random The random number generator. Can be utilised to calculate a random jitter value for the retry.
* @return The amount of time to delay before retrying to associated operation; or {@code null} if the it cannot be
* retried.
*/
protected abstract Duration calculateRetryDelay(int retryCount, Duration baseDelay, Duration baseJitter,
ThreadLocalRandom random);
@Override
public int hashCode() {
return Objects.hash(retryOptions);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof AmqpRetryPolicy)) {
return false;
}
final AmqpRetryPolicy other = (AmqpRetryPolicy) obj;
return retryOptions.equals(other.retryOptions);
}
/**
* Check if the existing exception is a retriable exception.
*
* @param exception An exception that was observed for the operation to be retried.
* @return true if the exception is a retriable exception, otherwise false.
*/
private static boolean isRetriableException(Throwable exception) {
return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient();
}
} | class AmqpRetryPolicy {
static final long NANOS_PER_SECOND = 1000_000_000L;
private static final double JITTER_FACTOR = 0.08;
private final AmqpRetryOptions retryOptions;
private final Duration baseJitter;
/**
* Creates an instance with the given retry options. If {@link AmqpRetryOptions
* AmqpRetryOptions
* zero, requests failing with a retriable exception will not be retried.
*
* @param retryOptions The options to set on this retry policy.
* @throws NullPointerException if {@code retryOptions} is {@code null}.
*/
protected AmqpRetryPolicy(AmqpRetryOptions retryOptions) {
Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
this.retryOptions = retryOptions;
final double jitterInNanos = retryOptions.getDelay().getSeconds() * JITTER_FACTOR * NANOS_PER_SECOND;
baseJitter = Duration.ofNanos((long) jitterInNanos);
}
/**
* Gets the set of options used to configure this retry policy.
*
* @return The set of options used to configure this retry policy.
*/
public AmqpRetryOptions getRetryOptions() {
return retryOptions;
}
/**
* Gets the maximum number of retry attempts.
*
* @return The maximum number of retry attempts.
*/
public int getMaxRetries() {
return retryOptions.getMaxRetries();
}
/**
* Calculates the amount of time to delay before the next retry attempt.
*
* @param lastException The last exception that was observed for the operation to be retried.
* @param retryCount The number of attempts that have been made, including the initial attempt before any retries.
* @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation
* is no longer eligible to be retried.
*/
/**
* Calculates the amount of time to delay before the next retry attempt based on the {@code retryCount},
* {@code baseDelay}, and {@code baseJitter}.
*
* @param retryCount The number of attempts that have been made, including the initial attempt before any retries.
* @param baseDelay The base delay for a retry attempt.
* @param baseJitter The base jitter delay.
* @param random The random number generator. Can be utilised to calculate a random jitter value for the retry.
* @return The amount of time to delay before retrying to associated operation; or {@code null} if the it cannot be
* retried.
*/
protected abstract Duration calculateRetryDelay(int retryCount, Duration baseDelay, Duration baseJitter,
ThreadLocalRandom random);
@Override
public int hashCode() {
return Objects.hash(retryOptions);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof AmqpRetryPolicy)) {
return false;
}
final AmqpRetryPolicy other = (AmqpRetryPolicy) obj;
return retryOptions.equals(other.retryOptions);
}
/**
* Check if the existing exception is a retriable exception.
*
* @param exception An exception that was observed for the operation to be retried.
* @return true if the exception is a retriable exception, otherwise false.
*/
private static boolean isRetriableException(Throwable exception) {
return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient();
}
} |
Should we make all chained calls (>=3) be on separate lines? | public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
} | processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId) | public void onError(Throwable throwable) {
Objects.requireNonNull(throwable, "'throwable' is required.");
if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) {
logger.warning("Retry is already pending. Ignoring transient error.", throwable);
return;
}
final int attemptsMade = retryAttempts.getAndIncrement();
final int attempts;
final Duration retryInterval;
if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient())
|| (throwable instanceof IllegalStateException)
|| (throwable instanceof RejectedExecutionException)) {
attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries());
final Throwable throwableToUse = throwable instanceof AmqpException
? throwable
: new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext);
retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts);
} else {
attempts = attemptsMade;
retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts);
}
if (retryInterval != null) {
if (isRetryPending.getAndSet(true)) {
retryAttempts.decrementAndGet();
return;
}
logger.atInfo()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.addKeyValue(INTERVAL_KEY, retryInterval.toMillis())
.log("Transient error occurred. Retrying.", throwable);
retrySubscription = Mono.delay(retryInterval).subscribe(i -> {
if (isDisposed()) {
logger.atInfo()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.log("Not requesting from upstream. Processor is disposed.");
} else {
logger.atInfo().addKeyValue(TRY_COUNT_KEY, attemptsMade).log("Requesting from upstream.");
requestUpstream();
isRetryPending.set(false);
}
});
} else {
logger.atWarning()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.log("Retry attempts exhausted or exception was not retriable.", throwable);
lastError = throwable;
isDisposed.set(true);
dispose();
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onError(throwable));
}
}
} | class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable {
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM
= AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream");
private static final String TRY_COUNT_KEY = "tryCount";
private final ClientLogger logger;
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean isRequested = new AtomicBoolean();
private final AtomicBoolean isRetryPending = new AtomicBoolean();
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object lock = new Object();
private final AmqpRetryPolicy retryPolicy;
private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction;
private final AmqpErrorContext errorContext;
private volatile Subscription upstream;
private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>();
private volatile Throwable lastError;
private volatile T currentChannel;
private volatile Disposable connectionSubscription;
private volatile Disposable retrySubscription;
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param entityPath The entity path for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param logger The logger to use for this processor.
* @deprecated Use constructor overload that does not take {@link ClientLogger}
*/
@Deprecated
public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
Map<String, Object> loggingContext = new HashMap<>(1);
loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null."));
this.logger = new ClientLogger(getClass(), loggingContext);
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param loggingContext Additional context to add to the logging scope.
*/
public AmqpChannelProcessor(String fullyQualifiedNamespace,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy,
Map<String, Object> loggingContext) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
this.logger
= new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null."));
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
@Override
public void onSubscribe(Subscription subscription) {
if (Operators.setOnce(UPSTREAM, this, subscription)) {
isRequested.set(true);
subscription.request(1);
} else {
logger.warning("Processors can only be subscribed to once.");
}
}
@Override
public void onNext(T amqpChannel) {
logger.info("Setting next AMQP channel.");
Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null.");
final T oldChannel;
final Disposable oldSubscription;
synchronized (lock) {
oldChannel = currentChannel;
oldSubscription = connectionSubscription;
currentChannel = amqpChannel;
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel));
connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe(state -> {
if (state == AmqpEndpointState.ACTIVE) {
retryAttempts.set(0);
logger.info("Channel is now active.");
}
}, error -> {
setAndClearChannel();
onError(error);
}, () -> {
if (isDisposed()) {
logger.info("Channel is disposed.");
} else {
logger.info("Channel is closed. Requesting upstream.");
setAndClearChannel();
requestUpstream();
}
});
}
close(oldChannel);
if (oldSubscription != null) {
oldSubscription.dispose();
}
isRequested.set(false);
}
/**
* When downstream or upstream encounters an error, calculates whether to request another item upstream.
*
* @param throwable Exception to analyse.
*/
@Override
@Override
public void onComplete() {
logger.info("Upstream connection publisher was completed. Terminating processor.");
isDisposed.set(true);
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onComplete());
}
}
@Override
public void subscribe(CoreSubscriber<? super T> actual) {
if (isDisposed()) {
if (lastError != null) {
actual.onSubscribe(Operators.emptySubscription());
actual.onError(lastError);
} else {
Operators.error(actual, logger.logExceptionAsError(
new IllegalStateException("Cannot subscribe. Processor is already terminated.")));
}
return;
}
final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this);
actual.onSubscribe(subscriber);
synchronized (lock) {
if (currentChannel != null) {
subscriber.complete(currentChannel);
return;
}
}
subscriber.onAdd();
subscribers.add(subscriber);
if (!isRetryPending.get()) {
requestUpstream();
}
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
if (retrySubscription != null && !retrySubscription.isDisposed()) {
retrySubscription.dispose();
}
onComplete();
synchronized (lock) {
setAndClearChannel();
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
private void requestUpstream() {
if (currentChannel != null) {
logger.verbose("Connection exists, not requesting another.");
return;
} else if (isDisposed()) {
logger.verbose("Is already disposed.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.warning("There is no upstream subscription.");
return;
}
if (!isRequested.getAndSet(true)) {
logger.info("Connection not requested, yet. Requesting one.");
subscription.request(1);
}
}
private void setAndClearChannel() {
T oldChannel;
synchronized (lock) {
oldChannel = currentChannel;
currentChannel = null;
}
close(oldChannel);
}
/**
* Checks the current state of the channel for this channel and returns true if the channel is null or if this
* processor is disposed.
*
* @return true if the current channel in the processor is null or if the processor is disposed
*/
public boolean isChannelClosed() {
synchronized (lock) {
return currentChannel == null || isDisposed();
}
}
private void close(T channel) {
if (channel instanceof AsyncCloseable) {
((AsyncCloseable) channel).closeAsync().subscribe();
} else if (channel instanceof AutoCloseable) {
try {
((AutoCloseable) channel).close();
} catch (Exception error) {
logger.warning("Error occurred closing AutoCloseable channel.", error);
}
} else if (channel instanceof Disposable) {
try {
((Disposable) channel).dispose();
} catch (Exception error) {
logger.warning("Error occurred closing Disposable channel.", error);
}
}
}
/**
* Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor.
* These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor.
* The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives
* a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified,
* which removes itself from the tracking list, then propagates the notification to the wrapped subscriber.
*/
private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
void onAdd() {
Object subscriberIdObj = actual.currentContext().getOrDefault(SUBSCRIBER_ID_KEY, null);
if (subscriberIdObj != null) {
subscriberId = subscriberIdObj.toString();
} else {
subscriberId = StringUtil.getRandomString("un");
}
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Added subscriber.");
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
}
} | class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable {
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM
= AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream");
private static final String TRY_COUNT_KEY = "tryCount";
private final ClientLogger logger;
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean isRequested = new AtomicBoolean();
private final AtomicBoolean isRetryPending = new AtomicBoolean();
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object lock = new Object();
private final AmqpRetryPolicy retryPolicy;
private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction;
private final AmqpErrorContext errorContext;
private volatile Subscription upstream;
private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>();
private volatile Throwable lastError;
private volatile T currentChannel;
private volatile Disposable connectionSubscription;
private volatile Disposable retrySubscription;
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param entityPath The entity path for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param logger The logger to use for this processor.
* @deprecated Use constructor overload that does not take {@link ClientLogger}
*/
@Deprecated
public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
Map<String, Object> loggingContext = new HashMap<>(1);
loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null."));
this.logger = new ClientLogger(getClass(), loggingContext);
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param loggingContext Additional context to add to the logging scope.
*/
public AmqpChannelProcessor(String fullyQualifiedNamespace,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy,
Map<String, Object> loggingContext) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
this.logger
= new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null."));
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
@Override
public void onSubscribe(Subscription subscription) {
if (Operators.setOnce(UPSTREAM, this, subscription)) {
isRequested.set(true);
subscription.request(1);
} else {
logger.warning("Processors can only be subscribed to once.");
}
}
@Override
public void onNext(T amqpChannel) {
logger.info("Setting next AMQP channel.");
Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null.");
final T oldChannel;
final Disposable oldSubscription;
synchronized (lock) {
oldChannel = currentChannel;
oldSubscription = connectionSubscription;
currentChannel = amqpChannel;
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel));
connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe(state -> {
if (state == AmqpEndpointState.ACTIVE) {
retryAttempts.set(0);
logger.info("Channel is now active.");
}
}, error -> {
setAndClearChannel();
onError(error);
}, () -> {
if (isDisposed()) {
logger.info("Channel is disposed.");
} else {
logger.info("Channel is closed. Requesting upstream.");
setAndClearChannel();
requestUpstream();
}
});
}
close(oldChannel);
if (oldSubscription != null) {
oldSubscription.dispose();
}
isRequested.set(false);
}
/**
* When downstream or upstream encounters an error, calculates whether to request another item upstream.
*
* @param throwable Exception to analyse.
*/
@Override
@Override
public void onComplete() {
logger.info("Upstream connection publisher was completed. Terminating processor.");
isDisposed.set(true);
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onComplete());
}
}
@Override
public void subscribe(CoreSubscriber<? super T> actual) {
if (isDisposed()) {
if (lastError != null) {
actual.onSubscribe(Operators.emptySubscription());
actual.onError(lastError);
} else {
Operators.error(actual, logger.logExceptionAsError(
new IllegalStateException("Cannot subscribe. Processor is already terminated.")));
}
return;
}
final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this);
actual.onSubscribe(subscriber);
synchronized (lock) {
if (currentChannel != null) {
subscriber.complete(currentChannel);
return;
}
}
subscriber.onAdd();
subscribers.add(subscriber);
if (!isRetryPending.get()) {
requestUpstream();
}
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
if (retrySubscription != null && !retrySubscription.isDisposed()) {
retrySubscription.dispose();
}
onComplete();
synchronized (lock) {
setAndClearChannel();
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
private void requestUpstream() {
if (currentChannel != null) {
logger.verbose("Connection exists, not requesting another.");
return;
} else if (isDisposed()) {
logger.verbose("Is already disposed.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.warning("There is no upstream subscription.");
return;
}
if (!isRequested.getAndSet(true)) {
logger.info("Connection not requested, yet. Requesting one.");
subscription.request(1);
}
}
private void setAndClearChannel() {
T oldChannel;
synchronized (lock) {
oldChannel = currentChannel;
currentChannel = null;
}
close(oldChannel);
}
/**
* Checks the current state of the channel for this channel and returns true if the channel is null or if this
* processor is disposed.
*
* @return true if the current channel in the processor is null or if the processor is disposed
*/
public boolean isChannelClosed() {
synchronized (lock) {
return currentChannel == null || isDisposed();
}
}
private void close(T channel) {
if (channel instanceof AsyncCloseable) {
((AsyncCloseable) channel).closeAsync().subscribe();
} else if (channel instanceof AutoCloseable) {
try {
((AutoCloseable) channel).close();
} catch (Exception error) {
logger.warning("Error occurred closing AutoCloseable channel.", error);
}
} else if (channel instanceof Disposable) {
try {
((Disposable) channel).dispose();
} catch (Exception error) {
logger.warning("Error occurred closing Disposable channel.", error);
}
}
}
/**
* Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor.
* These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor.
* The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives
* a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified,
* which removes itself from the tracking list, then propagates the notification to the wrapped subscriber.
*/
private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
void onAdd() {
Object subscriberIdObj = actual.currentContext().getOrDefault(SUBSCRIBER_ID_KEY, null);
if (subscriberIdObj != null) {
subscriberId = subscriberIdObj.toString();
} else {
subscriberId = StringUtil.getRandomString("un");
}
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Added subscriber.");
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
}
} |
Unfortunately, this only has a few configuration options Always newline ```java process .logger .atInfo() .addKeyValue(SUBSCRIBER_ID_KEY, subscriberId) ``` Split after first ```java process.logger .atInfo() .addKeyValue(SUBSCRIBER_ID_KEY, subscriberId) ``` Current behavior / split only required ```java processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId) ``` From looking at this before the first two make the code very long, which necessarily isn't a problem. But it's almost the inverse terseness issue where it's too spread out. | public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
} | processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId) | public void onError(Throwable throwable) {
Objects.requireNonNull(throwable, "'throwable' is required.");
if (isRetryPending.get() && retryPolicy.calculateRetryDelay(throwable, retryAttempts.get()) != null) {
logger.warning("Retry is already pending. Ignoring transient error.", throwable);
return;
}
final int attemptsMade = retryAttempts.getAndIncrement();
final int attempts;
final Duration retryInterval;
if (((throwable instanceof AmqpException) && ((AmqpException) throwable).isTransient())
|| (throwable instanceof IllegalStateException)
|| (throwable instanceof RejectedExecutionException)) {
attempts = Math.min(attemptsMade, retryPolicy.getMaxRetries());
final Throwable throwableToUse = throwable instanceof AmqpException
? throwable
: new AmqpException(true, "Non-AmqpException occurred upstream.", throwable, errorContext);
retryInterval = retryPolicy.calculateRetryDelay(throwableToUse, attempts);
} else {
attempts = attemptsMade;
retryInterval = retryPolicy.calculateRetryDelay(throwable, attempts);
}
if (retryInterval != null) {
if (isRetryPending.getAndSet(true)) {
retryAttempts.decrementAndGet();
return;
}
logger.atInfo()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.addKeyValue(INTERVAL_KEY, retryInterval.toMillis())
.log("Transient error occurred. Retrying.", throwable);
retrySubscription = Mono.delay(retryInterval).subscribe(i -> {
if (isDisposed()) {
logger.atInfo()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.log("Not requesting from upstream. Processor is disposed.");
} else {
logger.atInfo().addKeyValue(TRY_COUNT_KEY, attemptsMade).log("Requesting from upstream.");
requestUpstream();
isRetryPending.set(false);
}
});
} else {
logger.atWarning()
.addKeyValue(TRY_COUNT_KEY, attemptsMade)
.log("Retry attempts exhausted or exception was not retriable.", throwable);
lastError = throwable;
isDisposed.set(true);
dispose();
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onError(throwable));
}
}
} | class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable {
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM
= AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream");
private static final String TRY_COUNT_KEY = "tryCount";
private final ClientLogger logger;
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean isRequested = new AtomicBoolean();
private final AtomicBoolean isRetryPending = new AtomicBoolean();
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object lock = new Object();
private final AmqpRetryPolicy retryPolicy;
private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction;
private final AmqpErrorContext errorContext;
private volatile Subscription upstream;
private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>();
private volatile Throwable lastError;
private volatile T currentChannel;
private volatile Disposable connectionSubscription;
private volatile Disposable retrySubscription;
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param entityPath The entity path for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param logger The logger to use for this processor.
* @deprecated Use constructor overload that does not take {@link ClientLogger}
*/
@Deprecated
public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
Map<String, Object> loggingContext = new HashMap<>(1);
loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null."));
this.logger = new ClientLogger(getClass(), loggingContext);
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param loggingContext Additional context to add to the logging scope.
*/
public AmqpChannelProcessor(String fullyQualifiedNamespace,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy,
Map<String, Object> loggingContext) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
this.logger
= new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null."));
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
@Override
public void onSubscribe(Subscription subscription) {
if (Operators.setOnce(UPSTREAM, this, subscription)) {
isRequested.set(true);
subscription.request(1);
} else {
logger.warning("Processors can only be subscribed to once.");
}
}
@Override
public void onNext(T amqpChannel) {
logger.info("Setting next AMQP channel.");
Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null.");
final T oldChannel;
final Disposable oldSubscription;
synchronized (lock) {
oldChannel = currentChannel;
oldSubscription = connectionSubscription;
currentChannel = amqpChannel;
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel));
connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe(state -> {
if (state == AmqpEndpointState.ACTIVE) {
retryAttempts.set(0);
logger.info("Channel is now active.");
}
}, error -> {
setAndClearChannel();
onError(error);
}, () -> {
if (isDisposed()) {
logger.info("Channel is disposed.");
} else {
logger.info("Channel is closed. Requesting upstream.");
setAndClearChannel();
requestUpstream();
}
});
}
close(oldChannel);
if (oldSubscription != null) {
oldSubscription.dispose();
}
isRequested.set(false);
}
/**
* When downstream or upstream encounters an error, calculates whether to request another item upstream.
*
* @param throwable Exception to analyse.
*/
@Override
@Override
public void onComplete() {
logger.info("Upstream connection publisher was completed. Terminating processor.");
isDisposed.set(true);
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onComplete());
}
}
@Override
public void subscribe(CoreSubscriber<? super T> actual) {
if (isDisposed()) {
if (lastError != null) {
actual.onSubscribe(Operators.emptySubscription());
actual.onError(lastError);
} else {
Operators.error(actual, logger.logExceptionAsError(
new IllegalStateException("Cannot subscribe. Processor is already terminated.")));
}
return;
}
final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this);
actual.onSubscribe(subscriber);
synchronized (lock) {
if (currentChannel != null) {
subscriber.complete(currentChannel);
return;
}
}
subscriber.onAdd();
subscribers.add(subscriber);
if (!isRetryPending.get()) {
requestUpstream();
}
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
if (retrySubscription != null && !retrySubscription.isDisposed()) {
retrySubscription.dispose();
}
onComplete();
synchronized (lock) {
setAndClearChannel();
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
private void requestUpstream() {
if (currentChannel != null) {
logger.verbose("Connection exists, not requesting another.");
return;
} else if (isDisposed()) {
logger.verbose("Is already disposed.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.warning("There is no upstream subscription.");
return;
}
if (!isRequested.getAndSet(true)) {
logger.info("Connection not requested, yet. Requesting one.");
subscription.request(1);
}
}
private void setAndClearChannel() {
T oldChannel;
synchronized (lock) {
oldChannel = currentChannel;
currentChannel = null;
}
close(oldChannel);
}
/**
* Checks the current state of the channel for this channel and returns true if the channel is null or if this
* processor is disposed.
*
* @return true if the current channel in the processor is null or if the processor is disposed
*/
public boolean isChannelClosed() {
synchronized (lock) {
return currentChannel == null || isDisposed();
}
}
private void close(T channel) {
if (channel instanceof AsyncCloseable) {
((AsyncCloseable) channel).closeAsync().subscribe();
} else if (channel instanceof AutoCloseable) {
try {
((AutoCloseable) channel).close();
} catch (Exception error) {
logger.warning("Error occurred closing AutoCloseable channel.", error);
}
} else if (channel instanceof Disposable) {
try {
((Disposable) channel).dispose();
} catch (Exception error) {
logger.warning("Error occurred closing Disposable channel.", error);
}
}
}
/**
* Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor.
* These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor.
* The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives
* a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified,
* which removes itself from the tracking list, then propagates the notification to the wrapped subscriber.
*/
private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
void onAdd() {
Object subscriberIdObj = actual.currentContext().getOrDefault(SUBSCRIBER_ID_KEY, null);
if (subscriberIdObj != null) {
subscriberId = subscriberIdObj.toString();
} else {
subscriberId = StringUtil.getRandomString("un");
}
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Added subscriber.");
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
}
} | class AmqpChannelProcessor<T> extends Mono<T> implements Processor<T, T>, CoreSubscriber<T>, Disposable {
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<AmqpChannelProcessor, Subscription> UPSTREAM
= AtomicReferenceFieldUpdater.newUpdater(AmqpChannelProcessor.class, Subscription.class, "upstream");
private static final String TRY_COUNT_KEY = "tryCount";
private final ClientLogger logger;
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean isRequested = new AtomicBoolean();
private final AtomicBoolean isRetryPending = new AtomicBoolean();
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object lock = new Object();
private final AmqpRetryPolicy retryPolicy;
private final Function<T, Flux<AmqpEndpointState>> endpointStatesFunction;
private final AmqpErrorContext errorContext;
private volatile Subscription upstream;
private volatile ConcurrentLinkedDeque<ChannelSubscriber<T>> subscribers = new ConcurrentLinkedDeque<>();
private volatile Throwable lastError;
private volatile T currentChannel;
private volatile Disposable connectionSubscription;
private volatile Disposable retrySubscription;
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param entityPath The entity path for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param logger The logger to use for this processor.
* @deprecated Use constructor overload that does not take {@link ClientLogger}
*/
@Deprecated
public AmqpChannelProcessor(String fullyQualifiedNamespace, String entityPath,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy, ClientLogger logger) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
Map<String, Object> loggingContext = new HashMap<>(1);
loggingContext.put(ENTITY_PATH_KEY, Objects.requireNonNull(entityPath, "'entityPath' cannot be null."));
this.logger = new ClientLogger(getClass(), loggingContext);
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
/**
* Creates an instance of {@link AmqpChannelProcessor}.
*
* @param fullyQualifiedNamespace The fully qualified namespace for the AMQP connection.
* @param endpointStatesFunction The function that returns the endpoint states for the AMQP connection.
* @param retryPolicy The retry policy for the AMQP connection.
* @param loggingContext Additional context to add to the logging scope.
*/
public AmqpChannelProcessor(String fullyQualifiedNamespace,
Function<T, Flux<AmqpEndpointState>> endpointStatesFunction, AmqpRetryPolicy retryPolicy,
Map<String, Object> loggingContext) {
this.endpointStatesFunction
= Objects.requireNonNull(endpointStatesFunction, "'endpointStates' cannot be null.");
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
this.logger
= new ClientLogger(getClass(), Objects.requireNonNull(loggingContext, "'loggingContext' cannot be null."));
this.errorContext = new AmqpErrorContext(fullyQualifiedNamespace);
}
@Override
public void onSubscribe(Subscription subscription) {
if (Operators.setOnce(UPSTREAM, this, subscription)) {
isRequested.set(true);
subscription.request(1);
} else {
logger.warning("Processors can only be subscribed to once.");
}
}
@Override
public void onNext(T amqpChannel) {
logger.info("Setting next AMQP channel.");
Objects.requireNonNull(amqpChannel, "'amqpChannel' cannot be null.");
final T oldChannel;
final Disposable oldSubscription;
synchronized (lock) {
oldChannel = currentChannel;
oldSubscription = connectionSubscription;
currentChannel = amqpChannel;
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
currentSubscribers.forEach(subscription -> subscription.onNext(amqpChannel));
connectionSubscription = endpointStatesFunction.apply(amqpChannel).subscribe(state -> {
if (state == AmqpEndpointState.ACTIVE) {
retryAttempts.set(0);
logger.info("Channel is now active.");
}
}, error -> {
setAndClearChannel();
onError(error);
}, () -> {
if (isDisposed()) {
logger.info("Channel is disposed.");
} else {
logger.info("Channel is closed. Requesting upstream.");
setAndClearChannel();
requestUpstream();
}
});
}
close(oldChannel);
if (oldSubscription != null) {
oldSubscription.dispose();
}
isRequested.set(false);
}
/**
* When downstream or upstream encounters an error, calculates whether to request another item upstream.
*
* @param throwable Exception to analyse.
*/
@Override
@Override
public void onComplete() {
logger.info("Upstream connection publisher was completed. Terminating processor.");
isDisposed.set(true);
synchronized (lock) {
final ConcurrentLinkedDeque<ChannelSubscriber<T>> currentSubscribers = subscribers;
subscribers = new ConcurrentLinkedDeque<>();
currentSubscribers.forEach(subscriber -> subscriber.onComplete());
}
}
@Override
public void subscribe(CoreSubscriber<? super T> actual) {
if (isDisposed()) {
if (lastError != null) {
actual.onSubscribe(Operators.emptySubscription());
actual.onError(lastError);
} else {
Operators.error(actual, logger.logExceptionAsError(
new IllegalStateException("Cannot subscribe. Processor is already terminated.")));
}
return;
}
final ChannelSubscriber<T> subscriber = new ChannelSubscriber<T>(actual, this);
actual.onSubscribe(subscriber);
synchronized (lock) {
if (currentChannel != null) {
subscriber.complete(currentChannel);
return;
}
}
subscriber.onAdd();
subscribers.add(subscriber);
if (!isRetryPending.get()) {
requestUpstream();
}
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
if (retrySubscription != null && !retrySubscription.isDisposed()) {
retrySubscription.dispose();
}
onComplete();
synchronized (lock) {
setAndClearChannel();
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
private void requestUpstream() {
if (currentChannel != null) {
logger.verbose("Connection exists, not requesting another.");
return;
} else if (isDisposed()) {
logger.verbose("Is already disposed.");
return;
}
final Subscription subscription = UPSTREAM.get(this);
if (subscription == null) {
logger.warning("There is no upstream subscription.");
return;
}
if (!isRequested.getAndSet(true)) {
logger.info("Connection not requested, yet. Requesting one.");
subscription.request(1);
}
}
private void setAndClearChannel() {
T oldChannel;
synchronized (lock) {
oldChannel = currentChannel;
currentChannel = null;
}
close(oldChannel);
}
/**
* Checks the current state of the channel for this channel and returns true if the channel is null or if this
* processor is disposed.
*
* @return true if the current channel in the processor is null or if the processor is disposed
*/
public boolean isChannelClosed() {
synchronized (lock) {
return currentChannel == null || isDisposed();
}
}
private void close(T channel) {
if (channel instanceof AsyncCloseable) {
((AsyncCloseable) channel).closeAsync().subscribe();
} else if (channel instanceof AutoCloseable) {
try {
((AutoCloseable) channel).close();
} catch (Exception error) {
logger.warning("Error occurred closing AutoCloseable channel.", error);
}
} else if (channel instanceof Disposable) {
try {
((Disposable) channel).dispose();
} catch (Exception error) {
logger.warning("Error occurred closing Disposable channel.", error);
}
}
}
/**
* Represents the decorator-subscriber wrapping a downstream subscriber to AmqpChannelProcessor.
* These are the subscribers waiting to receive a channel that is yet to be available in the AmqpChannelProcessor.
* The AmqpChannelProcessor tracks a list of such waiting subscribers; once the processor receives
* a result (channel, error or disposal) from it's upstream, each decorated-subscriber will be notified,
* which removes itself from the tracking list, then propagates the notification to the wrapped subscriber.
*/
private static final class ChannelSubscriber<T> extends Operators.MonoSubscriber<T, T> {
private final AmqpChannelProcessor<T> processor;
private String subscriberId = null;
private ChannelSubscriber(CoreSubscriber<? super T> actual, AmqpChannelProcessor<T> processor) {
super(actual);
this.processor = processor;
}
void onAdd() {
Object subscriberIdObj = actual.currentContext().getOrDefault(SUBSCRIBER_ID_KEY, null);
if (subscriberIdObj != null) {
subscriberId = subscriberIdObj.toString();
} else {
subscriberId = StringUtil.getRandomString("un");
}
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Added subscriber.");
}
@Override
public void cancel() {
processor.subscribers.remove(this);
super.cancel();
processor.logger.atVerbose().addKeyValue(SUBSCRIBER_ID_KEY, subscriberId).log("Canceled subscriber");
}
@Override
public void onComplete() {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onComplete();
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("AMQP channel processor completed.");
}
}
@Override
public void onNext(T channel) {
if (!isCancelled()) {
processor.subscribers.remove(this);
super.complete(channel);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Next AMQP channel received.");
}
}
@Override
public void onError(Throwable throwable) {
if (!isCancelled()) {
processor.subscribers.remove(this);
actual.onError(throwable);
processor.logger.atInfo()
.addKeyValue(SUBSCRIBER_ID_KEY, subscriberId)
.log("Error in AMQP channel processor.");
} else {
Operators.onErrorDropped(throwable, currentContext());
}
}
}
} |
Performance idea, which we may be able to copy back to azure-core. Have a static read-only `HashSet` that is the default value here instead of a new HashSet and defer creating a new `HashSet` until `add*` methods are called. If those are never called, or only `set*` methods are called, then we've removed a copy constructor call. The `add*` methods may need an object reference equality check if someone calls ```java HttpClientOptions options = new HttpClientOptions(); options.setAllowedHeaderNames(options.getAllowedHeaderNames()) .addAllowedHeaderName("nonsense"); ``` Not a common pattern, or sensical one, but it would guard us against an attempt to mutated a read-only collection. | public HttpLogOptions() {
logLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_ALLOWLIST);
allowedQueryParamNames = new HashSet<>(DEFAULT_QUERY_PARAMS_ALLOWLIST);
} | } | public HttpLogOptions() {
logLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
allowedHeaderNames = new ArrayList<>(DEFAULT_HEADERS_ALLOWLIST);
allowedQueryParamNames = new HashSet<>(DEFAULT_QUERY_PARAMS_ALLOWLIST);
} | class HttpLogOptions {
private HttpLogDetailLevel logLevel;
private Set<String> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private boolean prettyPrintBody;
private HttpRequestLogger requestLogger;
private HttpResponseLogger responseLogger;
private static final List<String> DEFAULT_HEADERS_ALLOWLIST = Arrays.asList(
"traceparent",
"Accept",
"Cache-Control",
"Connection",
"Content-Length",
"Content-Type",
"Date",
"ETag",
"Expires",
"If-Match",
"If-Modified-Since",
"If-None-Match",
"If-Unmodified-Since",
"Last-Modified",
"Pragma",
"Request-Id",
"Retry-After",
"Server",
"Transfer-Encoding",
"User-Agent",
"WWW-Authenticate"
);
private static final List<String> DEFAULT_QUERY_PARAMS_ALLOWLIST = Collections.singletonList(
"api-version"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the allowed headers that should be logged.
*
* @return The list of allowed headers.
*/
public Set<String> getAllowedHeaderNames() {
return allowedHeaderNames;
}
/**
* Sets the given allowed headers that should be logged.
*
* <p>
* This method sets the provided header names to be the allowed header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers. Additionally, users can use
* {@link HttpLogOptions
* remove more headers names to the existing set of allowed header names.
* </p>
*
* @param allowedHeaderNames The list of allowed header names from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given allowed header to the default header set that should be logged.
*
* @param allowedHeaderName The allowed header name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the allowed query parameters.
*
* @return The list of allowed query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return allowedQueryParamNames;
}
/**
* Sets the given allowed query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of allowed query params from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given allowed query param that should be logged.
*
* @param allowedQueryParamName The allowed query param name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
this.getClass().getName();
return this;
}
/**
* Gets flag to allow pretty printing of message bodies.
*
* @return true if pretty printing of message bodies is allowed.
*/
public boolean isPrettyPrintBody() {
return prettyPrintBody;
}
/**
* Sets flag to allow pretty printing of message bodies.
*
* @param prettyPrintBody If true, pretty prints message bodies when logging. If the detailLevel does not include
* body logging, this flag does nothing.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setPrettyPrintBody(boolean prettyPrintBody) {
this.prettyPrintBody = prettyPrintBody;
return this;
}
/**
* Gets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @return The {@link HttpRequestLogger} that will be used to log HTTP requests.
*/
public HttpRequestLogger getRequestLogger() {
return requestLogger;
}
/**
* Sets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @param requestLogger The {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setRequestLogger(HttpRequestLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
/**
* Gets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @return The {@link HttpResponseLogger} that will be used to log HTTP responses.
*/
public HttpResponseLogger getResponseLogger() {
return responseLogger;
}
/**
* Sets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @param responseLogger The {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setResponseLogger(HttpResponseLogger responseLogger) {
this.responseLogger = responseLogger;
return this;
}
/**
* The level of detail to log on HTTP messages.
*/
public enum HttpLogDetailLevel {
/**
* Logging is turned off.
*/
NONE,
/**
* Logs only URLs, HTTP methods, and time to finish the request.
*/
BASIC,
/**
* Logs everything in BASIC, plus all the request and response headers.
*/
HEADERS,
/**
* Logs everything in BASIC, plus all the request and response body. Note that only payloads in plain text or
* plain text encoded in GZIP will be logged.
*/
BODY,
/**
* Logs everything in HEADERS and BODY.
*/
BODY_AND_HEADERS;
static final String BASIC_VALUE = "basic";
static final String HEADERS_VALUE = "headers";
static final String BODY_VALUE = "body";
static final String BODY_AND_HEADERS_VALUE = "body_and_headers";
static final String BODYANDHEADERS_VALUE = "bodyandheaders";
static final HttpLogDetailLevel ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL = fromConfiguration(getGlobalConfiguration());
static HttpLogDetailLevel fromConfiguration(Configuration configuration) {
String detailLevel = configuration.get(Configuration.PROPERTY_HTTP_LOG_DETAIL_LEVEL, "none");
HttpLogDetailLevel logDetailLevel;
if (BASIC_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BASIC;
} else if (HEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = HEADERS;
} else if (BODY_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY;
} else if (BODY_AND_HEADERS_VALUE.equalsIgnoreCase(detailLevel)
|| BODYANDHEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY_AND_HEADERS;
} else {
logDetailLevel = NONE;
}
return logDetailLevel;
}
/**
* Whether a URL should be logged.
*
* @return Whether a URL should be logged.
*/
public boolean shouldLogUrl() {
return this != NONE;
}
/**
* Whether headers should be logged.
*
* @return Whether headers should be logged.
*/
public boolean shouldLogHeaders() {
return this == HEADERS || this == BODY_AND_HEADERS;
}
/**
* Whether a body should be logged.
*
* @return Whether a body should be logged.
*/
public boolean shouldLogBody() {
return this == BODY || this == BODY_AND_HEADERS;
}
}
} | class HttpLogOptions {
private HttpLogDetailLevel logLevel;
private List<HeaderName> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private HttpRequestLogger requestLogger;
private HttpResponseLogger responseLogger;
private static final List<HeaderName> DEFAULT_HEADERS_ALLOWLIST = Arrays.asList(
HeaderName.TRACEPARENT,
HeaderName.ACCEPT,
HeaderName.CACHE_CONTROL,
HeaderName.CONNECTION,
HeaderName.CONTENT_LENGTH,
HeaderName.CONTENT_TYPE,
HeaderName.DATE,
HeaderName.ETAG,
HeaderName.EXPIRES,
HeaderName.IF_MATCH,
HeaderName.IF_MODIFIED_SINCE,
HeaderName.IF_NONE_MATCH,
HeaderName.IF_UNMODIFIED_SINCE,
HeaderName.LAST_MODIFIED,
HeaderName.PRAGMA,
HeaderName.CLIENT_REQUEST_ID,
HeaderName.RETRY_AFTER,
HeaderName.SERVER,
HeaderName.TRANSFER_ENCODING,
HeaderName.USER_AGENT,
HeaderName.WWW_AUTHENTICATE
);
private static final List<String> DEFAULT_QUERY_PARAMS_ALLOWLIST = Collections.singletonList(
"api-version"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the allowed headers that should be logged.
*
* @return The list of allowed headers.
*/
public List<HeaderName> getAllowedHeaderNames() {
return Collections.unmodifiableList(allowedHeaderNames);
}
/**
* Sets the given allowed headers that should be logged.
*
* <p>
* This method sets the provided header names to be the allowed header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers. Additionally, users can use
* {@link HttpLogOptions
* remove more headers names to the existing set of allowed header names.
* </p>
*
* @param allowedHeaderNames The list of allowed header names from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final List<HeaderName> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new ArrayList<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given allowed header to the default header set that should be logged.
*
* @param allowedHeaderName The allowed header name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final HeaderName allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the allowed query parameters.
*
* @return The list of allowed query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return Collections.unmodifiableSet(allowedQueryParamNames);
}
/**
* Sets the given allowed query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of allowed query params from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given allowed query param that should be logged.
*
* @param allowedQueryParamName The allowed query param name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
return this;
}
/**
* Gets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @return The {@link HttpRequestLogger} that will be used to log HTTP requests.
*/
public HttpRequestLogger getRequestLogger() {
return requestLogger;
}
/**
* Sets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @param requestLogger The {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setRequestLogger(HttpRequestLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
/**
* Gets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @return The {@link HttpResponseLogger} that will be used to log HTTP responses.
*/
public HttpResponseLogger getResponseLogger() {
return responseLogger;
}
/**
* Sets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @param responseLogger The {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setResponseLogger(HttpResponseLogger responseLogger) {
this.responseLogger = responseLogger;
return this;
}
/**
* The level of detail to log on HTTP messages.
*/
public enum HttpLogDetailLevel {
/**
* Logging is turned off.
*/
NONE,
/**
* Logs only URLs, HTTP methods, and time to finish the request.
*/
BASIC,
/**
* Logs everything in BASIC, plus all allowed request and response headers.
*/
HEADERS,
/**
* Logs everything in BASIC, plus all the request and response body. Note that only payloads in plain text or
* plain text encoded in GZIP will be logged.
*/
BODY,
/**
* Logs everything in HEADERS and BODY.
*/
BODYANDHEADERS;
static final String BASIC_VALUE = "basic";
static final String HEADERS_VALUE = "headers";
static final String BODY_VALUE = "body";
static final String BODY_AND_HEADERS_VALUE = "body_and_headers";
static final String BODYANDHEADERS_VALUE = "bodyandheaders";
static final HttpLogDetailLevel ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL = fromConfiguration(getGlobalConfiguration());
static HttpLogDetailLevel fromConfiguration(Configuration configuration) {
String detailLevel = configuration.get(Configuration.PROPERTY_HTTP_LOG_DETAIL_LEVEL, "none");
HttpLogDetailLevel logDetailLevel;
if (BASIC_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BASIC;
} else if (HEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = HEADERS;
} else if (BODY_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY;
} else if (BODY_AND_HEADERS_VALUE.equalsIgnoreCase(detailLevel)
|| BODYANDHEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODYANDHEADERS;
} else {
logDetailLevel = NONE;
}
return logDetailLevel;
}
/**
* Whether a URL should be logged.
*
* @return Whether a URL should be logged.
*/
public boolean shouldLogUrl() {
return this != NONE;
}
/**
* Whether headers should be logged.
*
* @return Whether headers should be logged.
*/
public boolean shouldLogHeaders() {
return this == HEADERS || this == BODYANDHEADERS;
}
/**
* Whether a body should be logged.
*
* @return Whether a body should be logged.
*/
public boolean shouldLogBody() {
return this == BODY || this == BODYANDHEADERS;
}
}
} |
I may recommend making this a VERBOSE log as this isn't something a user could do anything about and would just stuff their log system. | private static long getContentLength(ClientLogger logger, Headers headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.log(ClientLogger.LogLevel.INFORMATIONAL,
() -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e);
}
return contentLength;
} | () -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e); | private static long getContentLength(ClientLogger logger, Headers headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.atVerbose().log(() -> "Could not parse the HTTP header content-length: '"
+ contentLengthString + "'.", e);
}
return contentLength;
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
private void logHeaders(ClientLogger logger, HttpResponse response, ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(ClientLogger.LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(HttpResponse response, Duration duration, ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(),
allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, duration.toMillis());
}
}
private void logContentLength(HttpResponse response, ClientLogger.LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponse(ClientLogger logger, HttpResponse response, Duration duration) {
final ClientLogger.LogLevel logLevel = getLogLevel(response);
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
ClientLogger.LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(response, duration, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
private void logHeaders(ClientLogger logger, HttpResponse response, ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(ClientLogger.LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(HttpResponse response, Duration duration, ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(),
allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, duration.toMillis());
}
}
private void logContentLength(HttpResponse response, ClientLogger.LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponse(ClientLogger logger, HttpResponse response, Duration duration) {
final ClientLogger.LogLevel logLevel = getLogLevel(response);
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
ClientLogger.LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(response, duration, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder
);
}
}
logBuilder.log(() -> RESPONSE_LOG_MESSAGE);
return response;
}
} |
But how will then the user be able to see the default allowed headers list before doing the `add` . It would be empty in that case since we deferred creating the HashSet until add* methods are called. | public HttpLogOptions() {
logLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_ALLOWLIST);
allowedQueryParamNames = new HashSet<>(DEFAULT_QUERY_PARAMS_ALLOWLIST);
} | } | public HttpLogOptions() {
logLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
allowedHeaderNames = new ArrayList<>(DEFAULT_HEADERS_ALLOWLIST);
allowedQueryParamNames = new HashSet<>(DEFAULT_QUERY_PARAMS_ALLOWLIST);
} | class HttpLogOptions {
private HttpLogDetailLevel logLevel;
private Set<String> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private boolean prettyPrintBody;
private HttpRequestLogger requestLogger;
private HttpResponseLogger responseLogger;
private static final List<String> DEFAULT_HEADERS_ALLOWLIST = Arrays.asList(
"traceparent",
"Accept",
"Cache-Control",
"Connection",
"Content-Length",
"Content-Type",
"Date",
"ETag",
"Expires",
"If-Match",
"If-Modified-Since",
"If-None-Match",
"If-Unmodified-Since",
"Last-Modified",
"Pragma",
"Request-Id",
"Retry-After",
"Server",
"Transfer-Encoding",
"User-Agent",
"WWW-Authenticate"
);
private static final List<String> DEFAULT_QUERY_PARAMS_ALLOWLIST = Collections.singletonList(
"api-version"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the allowed headers that should be logged.
*
* @return The list of allowed headers.
*/
public Set<String> getAllowedHeaderNames() {
return allowedHeaderNames;
}
/**
* Sets the given allowed headers that should be logged.
*
* <p>
* This method sets the provided header names to be the allowed header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers. Additionally, users can use
* {@link HttpLogOptions
* remove more headers names to the existing set of allowed header names.
* </p>
*
* @param allowedHeaderNames The list of allowed header names from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given allowed header to the default header set that should be logged.
*
* @param allowedHeaderName The allowed header name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the allowed query parameters.
*
* @return The list of allowed query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return allowedQueryParamNames;
}
/**
* Sets the given allowed query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of allowed query params from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given allowed query param that should be logged.
*
* @param allowedQueryParamName The allowed query param name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
this.getClass().getName();
return this;
}
/**
* Gets flag to allow pretty printing of message bodies.
*
* @return true if pretty printing of message bodies is allowed.
*/
public boolean isPrettyPrintBody() {
return prettyPrintBody;
}
/**
* Sets flag to allow pretty printing of message bodies.
*
* @param prettyPrintBody If true, pretty prints message bodies when logging. If the detailLevel does not include
* body logging, this flag does nothing.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setPrettyPrintBody(boolean prettyPrintBody) {
this.prettyPrintBody = prettyPrintBody;
return this;
}
/**
* Gets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @return The {@link HttpRequestLogger} that will be used to log HTTP requests.
*/
public HttpRequestLogger getRequestLogger() {
return requestLogger;
}
/**
* Sets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @param requestLogger The {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setRequestLogger(HttpRequestLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
/**
* Gets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @return The {@link HttpResponseLogger} that will be used to log HTTP responses.
*/
public HttpResponseLogger getResponseLogger() {
return responseLogger;
}
/**
* Sets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @param responseLogger The {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setResponseLogger(HttpResponseLogger responseLogger) {
this.responseLogger = responseLogger;
return this;
}
/**
* The level of detail to log on HTTP messages.
*/
public enum HttpLogDetailLevel {
/**
* Logging is turned off.
*/
NONE,
/**
* Logs only URLs, HTTP methods, and time to finish the request.
*/
BASIC,
/**
* Logs everything in BASIC, plus all the request and response headers.
*/
HEADERS,
/**
* Logs everything in BASIC, plus all the request and response body. Note that only payloads in plain text or
* plain text encoded in GZIP will be logged.
*/
BODY,
/**
* Logs everything in HEADERS and BODY.
*/
BODY_AND_HEADERS;
static final String BASIC_VALUE = "basic";
static final String HEADERS_VALUE = "headers";
static final String BODY_VALUE = "body";
static final String BODY_AND_HEADERS_VALUE = "body_and_headers";
static final String BODYANDHEADERS_VALUE = "bodyandheaders";
static final HttpLogDetailLevel ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL = fromConfiguration(getGlobalConfiguration());
static HttpLogDetailLevel fromConfiguration(Configuration configuration) {
String detailLevel = configuration.get(Configuration.PROPERTY_HTTP_LOG_DETAIL_LEVEL, "none");
HttpLogDetailLevel logDetailLevel;
if (BASIC_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BASIC;
} else if (HEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = HEADERS;
} else if (BODY_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY;
} else if (BODY_AND_HEADERS_VALUE.equalsIgnoreCase(detailLevel)
|| BODYANDHEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY_AND_HEADERS;
} else {
logDetailLevel = NONE;
}
return logDetailLevel;
}
/**
* Whether a URL should be logged.
*
* @return Whether a URL should be logged.
*/
public boolean shouldLogUrl() {
return this != NONE;
}
/**
* Whether headers should be logged.
*
* @return Whether headers should be logged.
*/
public boolean shouldLogHeaders() {
return this == HEADERS || this == BODY_AND_HEADERS;
}
/**
* Whether a body should be logged.
*
* @return Whether a body should be logged.
*/
public boolean shouldLogBody() {
return this == BODY || this == BODY_AND_HEADERS;
}
}
} | class HttpLogOptions {
private HttpLogDetailLevel logLevel;
private List<HeaderName> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private HttpRequestLogger requestLogger;
private HttpResponseLogger responseLogger;
private static final List<HeaderName> DEFAULT_HEADERS_ALLOWLIST = Arrays.asList(
HeaderName.TRACEPARENT,
HeaderName.ACCEPT,
HeaderName.CACHE_CONTROL,
HeaderName.CONNECTION,
HeaderName.CONTENT_LENGTH,
HeaderName.CONTENT_TYPE,
HeaderName.DATE,
HeaderName.ETAG,
HeaderName.EXPIRES,
HeaderName.IF_MATCH,
HeaderName.IF_MODIFIED_SINCE,
HeaderName.IF_NONE_MATCH,
HeaderName.IF_UNMODIFIED_SINCE,
HeaderName.LAST_MODIFIED,
HeaderName.PRAGMA,
HeaderName.CLIENT_REQUEST_ID,
HeaderName.RETRY_AFTER,
HeaderName.SERVER,
HeaderName.TRANSFER_ENCODING,
HeaderName.USER_AGENT,
HeaderName.WWW_AUTHENTICATE
);
private static final List<String> DEFAULT_QUERY_PARAMS_ALLOWLIST = Collections.singletonList(
"api-version"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the allowed headers that should be logged.
*
* @return The list of allowed headers.
*/
public List<HeaderName> getAllowedHeaderNames() {
return Collections.unmodifiableList(allowedHeaderNames);
}
/**
* Sets the given allowed headers that should be logged.
*
* <p>
* This method sets the provided header names to be the allowed header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers. Additionally, users can use
* {@link HttpLogOptions
* remove more headers names to the existing set of allowed header names.
* </p>
*
* @param allowedHeaderNames The list of allowed header names from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final List<HeaderName> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new ArrayList<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given allowed header to the default header set that should be logged.
*
* @param allowedHeaderName The allowed header name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final HeaderName allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the allowed query parameters.
*
* @return The list of allowed query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return Collections.unmodifiableSet(allowedQueryParamNames);
}
/**
* Sets the given allowed query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of allowed query params from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given allowed query param that should be logged.
*
* @param allowedQueryParamName The allowed query param name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
return this;
}
/**
* Gets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @return The {@link HttpRequestLogger} that will be used to log HTTP requests.
*/
public HttpRequestLogger getRequestLogger() {
return requestLogger;
}
/**
* Sets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @param requestLogger The {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setRequestLogger(HttpRequestLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
/**
* Gets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @return The {@link HttpResponseLogger} that will be used to log HTTP responses.
*/
public HttpResponseLogger getResponseLogger() {
return responseLogger;
}
/**
* Sets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @param responseLogger The {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setResponseLogger(HttpResponseLogger responseLogger) {
this.responseLogger = responseLogger;
return this;
}
/**
* The level of detail to log on HTTP messages.
*/
public enum HttpLogDetailLevel {
/**
* Logging is turned off.
*/
NONE,
/**
* Logs only URLs, HTTP methods, and time to finish the request.
*/
BASIC,
/**
* Logs everything in BASIC, plus all allowed request and response headers.
*/
HEADERS,
/**
* Logs everything in BASIC, plus all the request and response body. Note that only payloads in plain text or
* plain text encoded in GZIP will be logged.
*/
BODY,
/**
* Logs everything in HEADERS and BODY.
*/
BODYANDHEADERS;
static final String BASIC_VALUE = "basic";
static final String HEADERS_VALUE = "headers";
static final String BODY_VALUE = "body";
static final String BODY_AND_HEADERS_VALUE = "body_and_headers";
static final String BODYANDHEADERS_VALUE = "bodyandheaders";
static final HttpLogDetailLevel ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL = fromConfiguration(getGlobalConfiguration());
static HttpLogDetailLevel fromConfiguration(Configuration configuration) {
String detailLevel = configuration.get(Configuration.PROPERTY_HTTP_LOG_DETAIL_LEVEL, "none");
HttpLogDetailLevel logDetailLevel;
if (BASIC_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BASIC;
} else if (HEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = HEADERS;
} else if (BODY_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY;
} else if (BODY_AND_HEADERS_VALUE.equalsIgnoreCase(detailLevel)
|| BODYANDHEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODYANDHEADERS;
} else {
logDetailLevel = NONE;
}
return logDetailLevel;
}
/**
* Whether a URL should be logged.
*
* @return Whether a URL should be logged.
*/
public boolean shouldLogUrl() {
return this != NONE;
}
/**
* Whether headers should be logged.
*
* @return Whether headers should be logged.
*/
public boolean shouldLogHeaders() {
return this == HEADERS || this == BODYANDHEADERS;
}
/**
* Whether a body should be logged.
*
* @return Whether a body should be logged.
*/
public boolean shouldLogBody() {
return this == BODY || this == BODYANDHEADERS;
}
}
} |
We'd return the shared set in that case. And since we would change the get to read-only this would be read-only as well which would be safe to return. | public HttpLogOptions() {
logLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_ALLOWLIST);
allowedQueryParamNames = new HashSet<>(DEFAULT_QUERY_PARAMS_ALLOWLIST);
} | } | public HttpLogOptions() {
logLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
allowedHeaderNames = new ArrayList<>(DEFAULT_HEADERS_ALLOWLIST);
allowedQueryParamNames = new HashSet<>(DEFAULT_QUERY_PARAMS_ALLOWLIST);
} | class HttpLogOptions {
private HttpLogDetailLevel logLevel;
private Set<String> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private boolean prettyPrintBody;
private HttpRequestLogger requestLogger;
private HttpResponseLogger responseLogger;
private static final List<String> DEFAULT_HEADERS_ALLOWLIST = Arrays.asList(
"traceparent",
"Accept",
"Cache-Control",
"Connection",
"Content-Length",
"Content-Type",
"Date",
"ETag",
"Expires",
"If-Match",
"If-Modified-Since",
"If-None-Match",
"If-Unmodified-Since",
"Last-Modified",
"Pragma",
"Request-Id",
"Retry-After",
"Server",
"Transfer-Encoding",
"User-Agent",
"WWW-Authenticate"
);
private static final List<String> DEFAULT_QUERY_PARAMS_ALLOWLIST = Collections.singletonList(
"api-version"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the allowed headers that should be logged.
*
* @return The list of allowed headers.
*/
public Set<String> getAllowedHeaderNames() {
return allowedHeaderNames;
}
/**
* Sets the given allowed headers that should be logged.
*
* <p>
* This method sets the provided header names to be the allowed header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers. Additionally, users can use
* {@link HttpLogOptions
* remove more headers names to the existing set of allowed header names.
* </p>
*
* @param allowedHeaderNames The list of allowed header names from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given allowed header to the default header set that should be logged.
*
* @param allowedHeaderName The allowed header name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the allowed query parameters.
*
* @return The list of allowed query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return allowedQueryParamNames;
}
/**
* Sets the given allowed query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of allowed query params from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given allowed query param that should be logged.
*
* @param allowedQueryParamName The allowed query param name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
this.getClass().getName();
return this;
}
/**
* Gets flag to allow pretty printing of message bodies.
*
* @return true if pretty printing of message bodies is allowed.
*/
public boolean isPrettyPrintBody() {
return prettyPrintBody;
}
/**
* Sets flag to allow pretty printing of message bodies.
*
* @param prettyPrintBody If true, pretty prints message bodies when logging. If the detailLevel does not include
* body logging, this flag does nothing.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setPrettyPrintBody(boolean prettyPrintBody) {
this.prettyPrintBody = prettyPrintBody;
return this;
}
/**
* Gets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @return The {@link HttpRequestLogger} that will be used to log HTTP requests.
*/
public HttpRequestLogger getRequestLogger() {
return requestLogger;
}
/**
* Sets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @param requestLogger The {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setRequestLogger(HttpRequestLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
/**
* Gets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @return The {@link HttpResponseLogger} that will be used to log HTTP responses.
*/
public HttpResponseLogger getResponseLogger() {
return responseLogger;
}
/**
* Sets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @param responseLogger The {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setResponseLogger(HttpResponseLogger responseLogger) {
this.responseLogger = responseLogger;
return this;
}
/**
* The level of detail to log on HTTP messages.
*/
public enum HttpLogDetailLevel {
/**
* Logging is turned off.
*/
NONE,
/**
* Logs only URLs, HTTP methods, and time to finish the request.
*/
BASIC,
/**
* Logs everything in BASIC, plus all the request and response headers.
*/
HEADERS,
/**
* Logs everything in BASIC, plus all the request and response body. Note that only payloads in plain text or
* plain text encoded in GZIP will be logged.
*/
BODY,
/**
* Logs everything in HEADERS and BODY.
*/
BODY_AND_HEADERS;
static final String BASIC_VALUE = "basic";
static final String HEADERS_VALUE = "headers";
static final String BODY_VALUE = "body";
static final String BODY_AND_HEADERS_VALUE = "body_and_headers";
static final String BODYANDHEADERS_VALUE = "bodyandheaders";
static final HttpLogDetailLevel ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL = fromConfiguration(getGlobalConfiguration());
static HttpLogDetailLevel fromConfiguration(Configuration configuration) {
String detailLevel = configuration.get(Configuration.PROPERTY_HTTP_LOG_DETAIL_LEVEL, "none");
HttpLogDetailLevel logDetailLevel;
if (BASIC_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BASIC;
} else if (HEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = HEADERS;
} else if (BODY_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY;
} else if (BODY_AND_HEADERS_VALUE.equalsIgnoreCase(detailLevel)
|| BODYANDHEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY_AND_HEADERS;
} else {
logDetailLevel = NONE;
}
return logDetailLevel;
}
/**
* Whether a URL should be logged.
*
* @return Whether a URL should be logged.
*/
public boolean shouldLogUrl() {
return this != NONE;
}
/**
* Whether headers should be logged.
*
* @return Whether headers should be logged.
*/
public boolean shouldLogHeaders() {
return this == HEADERS || this == BODY_AND_HEADERS;
}
/**
* Whether a body should be logged.
*
* @return Whether a body should be logged.
*/
public boolean shouldLogBody() {
return this == BODY || this == BODY_AND_HEADERS;
}
}
} | class HttpLogOptions {
private HttpLogDetailLevel logLevel;
private List<HeaderName> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private HttpRequestLogger requestLogger;
private HttpResponseLogger responseLogger;
private static final List<HeaderName> DEFAULT_HEADERS_ALLOWLIST = Arrays.asList(
HeaderName.TRACEPARENT,
HeaderName.ACCEPT,
HeaderName.CACHE_CONTROL,
HeaderName.CONNECTION,
HeaderName.CONTENT_LENGTH,
HeaderName.CONTENT_TYPE,
HeaderName.DATE,
HeaderName.ETAG,
HeaderName.EXPIRES,
HeaderName.IF_MATCH,
HeaderName.IF_MODIFIED_SINCE,
HeaderName.IF_NONE_MATCH,
HeaderName.IF_UNMODIFIED_SINCE,
HeaderName.LAST_MODIFIED,
HeaderName.PRAGMA,
HeaderName.CLIENT_REQUEST_ID,
HeaderName.RETRY_AFTER,
HeaderName.SERVER,
HeaderName.TRANSFER_ENCODING,
HeaderName.USER_AGENT,
HeaderName.WWW_AUTHENTICATE
);
private static final List<String> DEFAULT_QUERY_PARAMS_ALLOWLIST = Collections.singletonList(
"api-version"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the allowed headers that should be logged.
*
* @return The list of allowed headers.
*/
public List<HeaderName> getAllowedHeaderNames() {
return Collections.unmodifiableList(allowedHeaderNames);
}
/**
* Sets the given allowed headers that should be logged.
*
* <p>
* This method sets the provided header names to be the allowed header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers. Additionally, users can use
* {@link HttpLogOptions
* remove more headers names to the existing set of allowed header names.
* </p>
*
* @param allowedHeaderNames The list of allowed header names from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final List<HeaderName> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new ArrayList<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given allowed header to the default header set that should be logged.
*
* @param allowedHeaderName The allowed header name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final HeaderName allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the allowed query parameters.
*
* @return The list of allowed query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return Collections.unmodifiableSet(allowedQueryParamNames);
}
/**
* Sets the given allowed query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of allowed query params from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given allowed query param that should be logged.
*
* @param allowedQueryParamName The allowed query param name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
return this;
}
/**
* Gets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @return The {@link HttpRequestLogger} that will be used to log HTTP requests.
*/
public HttpRequestLogger getRequestLogger() {
return requestLogger;
}
/**
* Sets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @param requestLogger The {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setRequestLogger(HttpRequestLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
/**
* Gets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @return The {@link HttpResponseLogger} that will be used to log HTTP responses.
*/
public HttpResponseLogger getResponseLogger() {
return responseLogger;
}
/**
* Sets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @param responseLogger The {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setResponseLogger(HttpResponseLogger responseLogger) {
this.responseLogger = responseLogger;
return this;
}
/**
* The level of detail to log on HTTP messages.
*/
public enum HttpLogDetailLevel {
/**
* Logging is turned off.
*/
NONE,
/**
* Logs only URLs, HTTP methods, and time to finish the request.
*/
BASIC,
/**
* Logs everything in BASIC, plus all allowed request and response headers.
*/
HEADERS,
/**
* Logs everything in BASIC, plus all the request and response body. Note that only payloads in plain text or
* plain text encoded in GZIP will be logged.
*/
BODY,
/**
* Logs everything in HEADERS and BODY.
*/
BODYANDHEADERS;
static final String BASIC_VALUE = "basic";
static final String HEADERS_VALUE = "headers";
static final String BODY_VALUE = "body";
static final String BODY_AND_HEADERS_VALUE = "body_and_headers";
static final String BODYANDHEADERS_VALUE = "bodyandheaders";
static final HttpLogDetailLevel ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL = fromConfiguration(getGlobalConfiguration());
static HttpLogDetailLevel fromConfiguration(Configuration configuration) {
String detailLevel = configuration.get(Configuration.PROPERTY_HTTP_LOG_DETAIL_LEVEL, "none");
HttpLogDetailLevel logDetailLevel;
if (BASIC_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BASIC;
} else if (HEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = HEADERS;
} else if (BODY_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY;
} else if (BODY_AND_HEADERS_VALUE.equalsIgnoreCase(detailLevel)
|| BODYANDHEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODYANDHEADERS;
} else {
logDetailLevel = NONE;
}
return logDetailLevel;
}
/**
* Whether a URL should be logged.
*
* @return Whether a URL should be logged.
*/
public boolean shouldLogUrl() {
return this != NONE;
}
/**
* Whether headers should be logged.
*
* @return Whether headers should be logged.
*/
public boolean shouldLogHeaders() {
return this == HEADERS || this == BODYANDHEADERS;
}
/**
* Whether a body should be logged.
*
* @return Whether a body should be logged.
*/
public boolean shouldLogBody() {
return this == BODY || this == BODYANDHEADERS;
}
}
} |
This doesn't need to be a string format as the supplied string is a constant, should initialize this once and use that every time. | private static HeaderName validateLocationHeader(String locationHeader) {
if (CoreUtils.isNullOrEmpty(locationHeader)) {
LOGGER.atError().log(() ->
String.format("'locationHeader' provided as null will be defaulted to {%s}", HeaderName.LOCATION));
return HeaderName.LOCATION;
} else {
return HeaderName.fromString(locationHeader);
}
} | String.format("'locationHeader' provided as null will be defaulted to {%s}", HeaderName.LOCATION)); | private static HeaderName validateLocationHeader(String locationHeader) {
if (CoreUtils.isNullOrEmpty(locationHeader)) {
LOGGER.atError().log(() ->
"'locationHeader' provided as null will be defaulted to {" + HeaderName.LOCATION + "}");
return HeaderName.LOCATION;
} else {
return HeaderName.fromString(locationHeader);
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = EnumSet.of(HttpMethod.GET, HttpMethod.HEAD);
private static final String REDIRECT_URLS_KEY = "redirectUrls";
private final int maxAttempts;
private final HeaderName locationHeader;
private final Set<HttpMethod> allowedRedirectHttpMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
*
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
this(maxAttempts, validateLocationHeader(locationHeader), validateAllowedMethods(allowedMethods));
}
private DefaultRedirectStrategy(int maxAttempts, HeaderName locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
this.locationHeader = locationHeader;
this.allowedRedirectHttpMethods = allowedMethods;
}
private static Set<HttpMethod> validateAllowedMethods(Set<HttpMethod> allowedMethods) {
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
LOGGER.atError().log(() ->
String.format("'allowedMethods' provided as null will be defaulted to {%s}",
DEFAULT_REDIRECT_ALLOWED_METHODS));
return DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
return EnumSet.copyOf(allowedMethods);
}
}
@Override
public boolean shouldAttemptRedirect(HttpRequest httpRequest, HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
if (isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isValidRedirectCount(tryCount)
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
String redirectUrl = httpResponse.getHeaderValue(locationHeader);
if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) {
LOGGER.atVerbose()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
.addKeyValue(REDIRECT_URLS_KEY, attemptedRedirectUrls::toString)
.log("Redirecting.");
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
return httpResponse.getRequest().setUrl(httpResponse.getHeaderValue(locationHeader));
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
*
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect,
* {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
LOGGER.atError()
.addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl)
.log("Request was redirected more than once to the same URL.");
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
*
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
LOGGER.atError()
.addKeyValue("maxAttempts", getMaxAttempts())
.log("Redirect attempts have been exhausted.");
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
*
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (allowedRedirectHttpMethods.contains(httpMethod)) {
return true;
} else {
LOGGER.atError()
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod)
.log("Request was redirected from an invalid redirect allowed method.");
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
*
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
} | class DefaultRedirectStrategy implements RedirectStrategy {
private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class);
private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3;
private static final int PERMANENT_REDIRECT_STATUS_CODE = 308;
private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307;
private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = EnumSet.of(HttpMethod.GET, HttpMethod.HEAD);
private static final String REDIRECT_URLS_KEY = "redirectUrls";
private final int maxAttempts;
private final HeaderName locationHeader;
private final Set<HttpMethod> allowedRedirectHttpMethods;
/**
* Creates an instance of {@link DefaultRedirectStrategy}.
*
* @param maxAttempts The max number of redirect attempts that can be made.
* @param locationHeader The header name containing the redirect URL.
* @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected.
*
* @throws IllegalArgumentException if {@code maxAttempts} is less than 0.
*/
public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) {
this(maxAttempts, validateLocationHeader(locationHeader), validateAllowedMethods(allowedMethods));
}
private DefaultRedirectStrategy(int maxAttempts, HeaderName locationHeader, Set<HttpMethod> allowedMethods) {
if (maxAttempts < 0) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("Max attempts cannot be less than 0."));
}
this.maxAttempts = maxAttempts;
this.locationHeader = locationHeader;
this.allowedRedirectHttpMethods = allowedMethods;
}
private static Set<HttpMethod> validateAllowedMethods(Set<HttpMethod> allowedMethods) {
if (CoreUtils.isNullOrEmpty(allowedMethods)) {
LOGGER.atError().log(() ->
String.format("'allowedMethods' provided as null will be defaulted to {%s}",
DEFAULT_REDIRECT_ALLOWED_METHODS));
return DEFAULT_REDIRECT_ALLOWED_METHODS;
} else {
return EnumSet.copyOf(allowedMethods);
}
}
@Override
public boolean shouldAttemptRedirect(HttpRequest httpRequest, HttpResponse httpResponse, int tryCount,
Set<String> attemptedRedirectUrls) {
if (isValidRedirectStatusCode(httpResponse.getStatusCode())
&& isValidRedirectCount(tryCount)
&& isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) {
String redirectUrl = httpResponse.getHeaderValue(locationHeader);
if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) {
LOGGER.atVerbose()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
.addKeyValue(REDIRECT_URLS_KEY, attemptedRedirectUrls::toString)
.log(() -> "Redirecting.");
attemptedRedirectUrls.add(redirectUrl);
return true;
} else {
return false;
}
} else {
return false;
}
}
@Override
public HttpRequest createRedirectRequest(HttpResponse httpResponse) {
return httpResponse.getRequest().setUrl(httpResponse.getHeaderValue(locationHeader));
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
/**
* Check if the redirect url provided in the response headers is already attempted.
*
* @param redirectUrl the redirect url provided in the response header.
* @param attemptedRedirectUrls the set containing a list of attempted redirect locations.
*
* @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect,
* {@code false} otherwise.
*/
private boolean alreadyAttemptedRedirectUrl(String redirectUrl,
Set<String> attemptedRedirectUrls) {
if (attemptedRedirectUrls.contains(redirectUrl)) {
LOGGER.atError()
.addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl)
.log(() -> "Request was redirected more than once to the same URL.");
return true;
}
return false;
}
/**
* Check if the attempt count of the redirect is less than the {@code maxAttempts}
*
* @param tryCount the try count for the HTTP request associated to the HTTP response.
*
* @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise.
*/
private boolean isValidRedirectCount(int tryCount) {
if (tryCount >= getMaxAttempts()) {
LOGGER.atError()
.addKeyValue("maxAttempts", getMaxAttempts())
.log(() -> "Redirect attempts have been exhausted.");
return false;
}
return true;
}
/**
* Check if the request http method is a valid redirect method.
*
* @param httpMethod the http method of the request.
*
* @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isAllowedRedirectMethod(HttpMethod httpMethod) {
if (allowedRedirectHttpMethods.contains(httpMethod)) {
return true;
} else {
LOGGER.atError()
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod)
.log(() -> "Request was redirected from an invalid redirect allowed method.");
return false;
}
}
/**
* Checks if the incoming request status code is a valid redirect status code.
*
* @param statusCode the status code of the incoming request.
*
* @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise.
*/
private boolean isValidRedirectStatusCode(int statusCode) {
return statusCode == HttpURLConnection.HTTP_MOVED_TEMP
|| statusCode == HttpURLConnection.HTTP_MOVED_PERM
|| statusCode == PERMANENT_REDIRECT_STATUS_CODE
|| statusCode == TEMPORARY_REDIRECT_STATUS_CODE;
}
} |
Apparently its in main too https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HttpLogOptions.java#L179 :octocat: | public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
this.getClass().getName();
return this;
} | this.getClass().getName(); | public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
return this;
} | class HttpLogOptions {
private HttpLogDetailLevel logLevel;
private List<HeaderName> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private HttpRequestLogger requestLogger;
private HttpResponseLogger responseLogger;
private static final List<HeaderName> DEFAULT_HEADERS_ALLOWLIST = Arrays.asList(
HeaderName.TRACEPARENT,
HeaderName.ACCEPT,
HeaderName.CACHE_CONTROL,
HeaderName.CONNECTION,
HeaderName.CONTENT_LENGTH,
HeaderName.CONTENT_TYPE,
HeaderName.DATE,
HeaderName.ETAG,
HeaderName.EXPIRES,
HeaderName.IF_MATCH,
HeaderName.IF_MODIFIED_SINCE,
HeaderName.IF_NONE_MATCH,
HeaderName.IF_UNMODIFIED_SINCE,
HeaderName.LAST_MODIFIED,
HeaderName.PRAGMA,
HeaderName.CLIENT_REQUEST_ID,
HeaderName.RETRY_AFTER,
HeaderName.SERVER,
HeaderName.TRANSFER_ENCODING,
HeaderName.USER_AGENT,
HeaderName.WWW_AUTHENTICATE
);
private static final List<String> DEFAULT_QUERY_PARAMS_ALLOWLIST = Collections.singletonList(
"api-version"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
public HttpLogOptions() {
logLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
allowedHeaderNames = new ArrayList<>(DEFAULT_HEADERS_ALLOWLIST);
allowedQueryParamNames = new HashSet<>(DEFAULT_QUERY_PARAMS_ALLOWLIST);
}
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the allowed headers that should be logged.
*
* @return The list of allowed headers.
*/
public List<HeaderName> getAllowedHeaderNames() {
return Collections.unmodifiableList(allowedHeaderNames);
}
/**
* Sets the given allowed headers that should be logged.
*
* <p>
* This method sets the provided header names to be the allowed header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers. Additionally, users can use
* {@link HttpLogOptions
* remove more headers names to the existing set of allowed header names.
* </p>
*
* @param allowedHeaderNames The list of allowed header names from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final List<HeaderName> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new ArrayList<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given allowed header to the default header set that should be logged.
*
* @param allowedHeaderName The allowed header name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(HeaderName.fromString(allowedHeaderName));
return this;
}
/**
* Gets the allowed query parameters.
*
* @return The list of allowed query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return Collections.unmodifiableSet(allowedQueryParamNames);
}
/**
* Sets the given allowed query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of allowed query params from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given allowed query param that should be logged.
*
* @param allowedQueryParamName The allowed query param name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
/**
* Gets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @return The {@link HttpRequestLogger} that will be used to log HTTP requests.
*/
public HttpRequestLogger getRequestLogger() {
return requestLogger;
}
/**
* Sets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @param requestLogger The {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setRequestLogger(HttpRequestLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
/**
* Gets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @return The {@link HttpResponseLogger} that will be used to log HTTP responses.
*/
public HttpResponseLogger getResponseLogger() {
return responseLogger;
}
/**
* Sets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @param responseLogger The {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setResponseLogger(HttpResponseLogger responseLogger) {
this.responseLogger = responseLogger;
return this;
}
/**
* The level of detail to log on HTTP messages.
*/
public enum HttpLogDetailLevel {
/**
* Logging is turned off.
*/
NONE,
/**
* Logs only URLs, HTTP methods, and time to finish the request.
*/
BASIC,
/**
* Logs everything in BASIC, plus all the request and response headers.
*/
HEADERS,
/**
* Logs everything in BASIC, plus all the request and response body. Note that only payloads in plain text or
* plain text encoded in GZIP will be logged.
*/
BODY,
/**
* Logs everything in HEADERS and BODY.
*/
BODYANDHEADERS;
static final String BASIC_VALUE = "basic";
static final String HEADERS_VALUE = "headers";
static final String BODY_VALUE = "body";
static final String BODY_AND_HEADERS_VALUE = "body_and_headers";
static final String BODYANDHEADERS_VALUE = "bodyandheaders";
static final HttpLogDetailLevel ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL = fromConfiguration(getGlobalConfiguration());
static HttpLogDetailLevel fromConfiguration(Configuration configuration) {
String detailLevel = configuration.get(Configuration.PROPERTY_HTTP_LOG_DETAIL_LEVEL, "none");
HttpLogDetailLevel logDetailLevel;
if (BASIC_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BASIC;
} else if (HEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = HEADERS;
} else if (BODY_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY;
} else if (BODY_AND_HEADERS_VALUE.equalsIgnoreCase(detailLevel)
|| BODYANDHEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODYANDHEADERS;
} else {
logDetailLevel = NONE;
}
return logDetailLevel;
}
/**
* Whether a URL should be logged.
*
* @return Whether a URL should be logged.
*/
public boolean shouldLogUrl() {
return this != NONE;
}
/**
* Whether headers should be logged.
*
* @return Whether headers should be logged.
*/
public boolean shouldLogHeaders() {
return this == HEADERS || this == BODYANDHEADERS;
}
/**
* Whether a body should be logged.
*
* @return Whether a body should be logged.
*/
public boolean shouldLogBody() {
return this == BODY || this == BODYANDHEADERS;
}
}
} | class HttpLogOptions {
private HttpLogDetailLevel logLevel;
private List<HeaderName> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private HttpRequestLogger requestLogger;
private HttpResponseLogger responseLogger;
private static final List<HeaderName> DEFAULT_HEADERS_ALLOWLIST = Arrays.asList(
HeaderName.TRACEPARENT,
HeaderName.ACCEPT,
HeaderName.CACHE_CONTROL,
HeaderName.CONNECTION,
HeaderName.CONTENT_LENGTH,
HeaderName.CONTENT_TYPE,
HeaderName.DATE,
HeaderName.ETAG,
HeaderName.EXPIRES,
HeaderName.IF_MATCH,
HeaderName.IF_MODIFIED_SINCE,
HeaderName.IF_NONE_MATCH,
HeaderName.IF_UNMODIFIED_SINCE,
HeaderName.LAST_MODIFIED,
HeaderName.PRAGMA,
HeaderName.CLIENT_REQUEST_ID,
HeaderName.RETRY_AFTER,
HeaderName.SERVER,
HeaderName.TRANSFER_ENCODING,
HeaderName.USER_AGENT,
HeaderName.WWW_AUTHENTICATE
);
private static final List<String> DEFAULT_QUERY_PARAMS_ALLOWLIST = Collections.singletonList(
"api-version"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
public HttpLogOptions() {
logLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
allowedHeaderNames = new ArrayList<>(DEFAULT_HEADERS_ALLOWLIST);
allowedQueryParamNames = new HashSet<>(DEFAULT_QUERY_PARAMS_ALLOWLIST);
}
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the allowed headers that should be logged.
*
* @return The list of allowed headers.
*/
public List<HeaderName> getAllowedHeaderNames() {
return Collections.unmodifiableList(allowedHeaderNames);
}
/**
* Sets the given allowed headers that should be logged.
*
* <p>
* This method sets the provided header names to be the allowed header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers. Additionally, users can use
* {@link HttpLogOptions
* remove more headers names to the existing set of allowed header names.
* </p>
*
* @param allowedHeaderNames The list of allowed header names from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final List<HeaderName> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new ArrayList<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given allowed header to the default header set that should be logged.
*
* @param allowedHeaderName The allowed header name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final HeaderName allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the allowed query parameters.
*
* @return The list of allowed query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return Collections.unmodifiableSet(allowedQueryParamNames);
}
/**
* Sets the given allowed query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of allowed query params from the user.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given allowed query param that should be logged.
*
* @param allowedQueryParamName The allowed query param name from the user.
*
* @return The updated HttpLogOptions object.
*
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
/**
* Gets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @return The {@link HttpRequestLogger} that will be used to log HTTP requests.
*/
public HttpRequestLogger getRequestLogger() {
return requestLogger;
}
/**
* Sets the {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* <p>A default {@link HttpRequestLogger} will be used if one isn't supplied.
*
* @param requestLogger The {@link HttpRequestLogger} that will be used to log HTTP requests.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setRequestLogger(HttpRequestLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
/**
* Gets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @return The {@link HttpResponseLogger} that will be used to log HTTP responses.
*/
public HttpResponseLogger getResponseLogger() {
return responseLogger;
}
/**
* Sets the {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* <p>A default {@link HttpResponseLogger} will be used if one isn't supplied.
*
* @param responseLogger The {@link HttpResponseLogger} that will be used to log HTTP responses.
*
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setResponseLogger(HttpResponseLogger responseLogger) {
this.responseLogger = responseLogger;
return this;
}
/**
* The level of detail to log on HTTP messages.
*/
public enum HttpLogDetailLevel {
/**
* Logging is turned off.
*/
NONE,
/**
* Logs only URLs, HTTP methods, and time to finish the request.
*/
BASIC,
/**
* Logs everything in BASIC, plus all allowed request and response headers.
*/
HEADERS,
/**
* Logs everything in BASIC, plus all the request and response body. Note that only payloads in plain text or
* plain text encoded in GZIP will be logged.
*/
BODY,
/**
* Logs everything in HEADERS and BODY.
*/
BODYANDHEADERS;
static final String BASIC_VALUE = "basic";
static final String HEADERS_VALUE = "headers";
static final String BODY_VALUE = "body";
static final String BODY_AND_HEADERS_VALUE = "body_and_headers";
static final String BODYANDHEADERS_VALUE = "bodyandheaders";
static final HttpLogDetailLevel ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL = fromConfiguration(getGlobalConfiguration());
static HttpLogDetailLevel fromConfiguration(Configuration configuration) {
String detailLevel = configuration.get(Configuration.PROPERTY_HTTP_LOG_DETAIL_LEVEL, "none");
HttpLogDetailLevel logDetailLevel;
if (BASIC_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BASIC;
} else if (HEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = HEADERS;
} else if (BODY_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODY;
} else if (BODY_AND_HEADERS_VALUE.equalsIgnoreCase(detailLevel)
|| BODYANDHEADERS_VALUE.equalsIgnoreCase(detailLevel)) {
logDetailLevel = BODYANDHEADERS;
} else {
logDetailLevel = NONE;
}
return logDetailLevel;
}
/**
* Whether a URL should be logged.
*
* @return Whether a URL should be logged.
*/
public boolean shouldLogUrl() {
return this != NONE;
}
/**
* Whether headers should be logged.
*
* @return Whether headers should be logged.
*/
public boolean shouldLogHeaders() {
return this == HEADERS || this == BODYANDHEADERS;
}
/**
* Whether a body should be logged.
*
* @return Whether a body should be logged.
*/
public boolean shouldLogBody() {
return this == BODY || this == BODYANDHEADERS;
}
}
} |
Should we allow shutting the executor down? If someone is explicitly calling this method, then the underlying executor service can be shutdown and any further operation on this will result in an exception. | public void shutdown() {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be shut down."));
} | throw LOGGER.logThrowableAsError( | public void shutdown() {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be shut down."));
} | class SharedExecutorService implements ExecutorService {
private static final ClientLogger LOGGER = new ClientLogger(SharedExecutorService.class);
private static final AtomicReference<SharedExecutorService> INSTANCE = new AtomicReference<>();
private static final AtomicLong AZURE_SDK_THREAD_COUNTER = new AtomicLong();
private static final String AZURE_SDK_THREAD_NAME = "azure-sdk-global-thread-";
private static final int THREAD_POOL_SIZE
= Configuration.getGlobalConfiguration().get("azure.sdk.threadPoolSize", config -> {
try {
int size = Integer.parseInt(config);
if (size <= 0) {
return 10 * Runtime.getRuntime().availableProcessors();
} else {
return size;
}
} catch (NumberFormatException ignored) {
return 10 * Runtime.getRuntime().availableProcessors();
}
});
private static final boolean VIRTUAL_THREAD_SUPPORTED;
private static final ReflectiveInvoker GET_VIRTUAL_THREAD_BUILDER;
private static final ReflectiveInvoker SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME;
private static final ReflectiveInvoker CREATE_VIRTUAL_THREAD_FACTORY;
static {
boolean virtualThreadSupported;
ReflectiveInvoker getVirtualThreadBuilder;
ReflectiveInvoker setVirtualThreadBuilderThreadName;
ReflectiveInvoker createVirtualThreadFactory;
try {
getVirtualThreadBuilder = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread").getDeclaredMethod("ofVirtual"));
setVirtualThreadBuilderThreadName = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread$Builder").getDeclaredMethod("name", String.class));
createVirtualThreadFactory = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread$Builder").getDeclaredMethod("factory"));
virtualThreadSupported = true;
} catch (Exception | LinkageError e) {
virtualThreadSupported = false;
getVirtualThreadBuilder = null;
setVirtualThreadBuilderThreadName = null;
createVirtualThreadFactory = null;
}
VIRTUAL_THREAD_SUPPORTED = virtualThreadSupported;
GET_VIRTUAL_THREAD_BUILDER = getVirtualThreadBuilder;
SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME = setVirtualThreadBuilderThreadName;
CREATE_VIRTUAL_THREAD_FACTORY = createVirtualThreadFactory;
}
private final ExecutorService wrappedExecutorService;
private final boolean internal;
private SharedExecutorService(ExecutorService executorService, boolean internal) {
this.wrappedExecutorService = executorService;
this.internal = internal;
}
/**
* Gets the shared instance of the executor service.
*
* @return The shared instance of the executor service.
*/
public static SharedExecutorService getInstance() {
return INSTANCE.updateAndGet(instance -> {
if (instance == null) {
return new SharedExecutorService(createSharedExecutor(), true);
} else {
return instance;
}
});
}
/**
* Sets the shared instance using the passed executor service.
* <p>
* If the executor service is already set, this will replace it with the new executor service. If the replaced
* executor service was created by this class, it will be shut down.
* <p>
* If the passed executor service is null, this will throw a {@link NullPointerException}. If the passed executor
*
* @param executorService The executor service to set as the shared instance.
* @throws NullPointerException If the passed executor service is null.
* @throws IllegalStateException If the passed executor service is shutdown or terminated.
*/
public static void setInstance(ExecutorService executorService) {
Objects.requireNonNull(executorService, "'executorService' cannot be null.");
if (executorService.isShutdown() || executorService.isTerminated()) {
throw new IllegalStateException("The passed executor service is shutdown or terminated.");
}
SharedExecutorService existing = INSTANCE.getAndSet(new SharedExecutorService(executorService, false));
if (existing != null && existing.internal) {
existing.wrappedExecutorService.shutdown();
}
}
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*/
@Override
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*
* @return Nothing will be returned as an exception will always be thrown.
*/
@Override
public List<Runnable> shutdownNow() {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be shut down."));
}
@Override
public boolean isShutdown() {
return wrappedExecutorService.isShutdown();
}
@Override
public boolean isTerminated() {
return wrappedExecutorService.isTerminated();
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) {
return false;
}
@Override
public void execute(Runnable command) {
wrappedExecutorService.execute(command);
}
@Override
public <T> Future<T> submit(Callable<T> task) {
return wrappedExecutorService.submit(task);
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
return wrappedExecutorService.submit(task, result);
}
@Override
public Future<?> submit(Runnable task) {
return wrappedExecutorService.submit(task);
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
return wrappedExecutorService.invokeAll(tasks);
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
return wrappedExecutorService.invokeAll(tasks, timeout, unit);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException {
return wrappedExecutorService.invokeAny(tasks);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return wrappedExecutorService.invokeAny(tasks, timeout, unit);
}
private static ExecutorService createSharedExecutor() {
ThreadFactory threadFactory;
if (VIRTUAL_THREAD_SUPPORTED) {
try {
threadFactory = createVirtualThreadFactory();
} catch (Exception e) {
threadFactory = createNonVirtualThreadFactory();
}
} else {
threadFactory = createNonVirtualThreadFactory();
}
ExecutorService sharedExecutor = CoreUtils.addShutdownHookSafely(
new ThreadPoolExecutor(0, THREAD_POOL_SIZE, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(), threadFactory),
Duration.ofSeconds(5));
ReferenceManager.INSTANCE.register(sharedExecutor, sharedExecutor::shutdown);
return sharedExecutor;
}
private static ThreadFactory createVirtualThreadFactory() throws Exception {
Object virtualThreadBuilder = GET_VIRTUAL_THREAD_BUILDER.invokeStatic();
SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME.invokeWithArguments(virtualThreadBuilder, AZURE_SDK_THREAD_NAME);
return (ThreadFactory) CREATE_VIRTUAL_THREAD_FACTORY.invokeWithArguments(virtualThreadBuilder);
}
private static ThreadFactory createNonVirtualThreadFactory() {
return r -> {
Thread thread = new Thread(r, AZURE_SDK_THREAD_NAME + AZURE_SDK_THREAD_COUNTER.getAndIncrement());
thread.setDaemon(true);
return thread;
};
}
} | class SharedExecutorService implements ExecutorService {
private static final ClientLogger LOGGER = new ClientLogger(SharedExecutorService.class);
private static final AtomicLong AZURE_SDK_THREAD_COUNTER = new AtomicLong();
private static final String AZURE_SDK_THREAD_NAME = "azure-sdk-global-thread-";
private static final int THREAD_POOL_SIZE;
private static final int THREAD_POOL_KEEP_ALIVE_MILLIS;
private static final boolean THREAD_POOL_VIRTUAL;
private static final SharedExecutorService INSTANCE;
static {
THREAD_POOL_SIZE
= getConfig("azure.sdk.shared.threadpool.maxpoolsize", "AZURE_SDK_SHARED_THREADPOOL_MAXPOOLSIZE",
Integer::parseInt, 10 * Runtime.getRuntime().availableProcessors());
THREAD_POOL_KEEP_ALIVE_MILLIS = getConfig("azure.sdk.shared.threadpool.keepalivemillis",
"AZURE_SDK_SHARED_THREADPOOL_KEEPALIVEMILLIS", Integer::parseInt, 60_000);
THREAD_POOL_VIRTUAL = getConfig("azure.sdk.shared.threadpool.usevirtualthreads",
"AZURE_SDK_SHARED_THREADPOOL_USEVIRTUALTHREADS", Boolean::parseBoolean, true);
INSTANCE = new SharedExecutorService();
}
private static <T> T getConfig(String systemProperty, String envVar, Function<String, T> converter,
T defaultValue) {
String foundValue = Configuration.getGlobalConfiguration()
.getFromEnvironment(systemProperty, envVar, ConfigurationProperty.REDACT_VALUE_SANITIZER);
if (foundValue == null) {
LOGGER.atVerbose()
.addKeyValue("systemProperty", systemProperty)
.addKeyValue("envVar", envVar)
.addKeyValue("defaultValue", defaultValue)
.log("Configuration value not found, using default.");
return defaultValue;
}
try {
T returnValue = converter.apply(foundValue);
LOGGER.atVerbose()
.addKeyValue("systemProperty", systemProperty)
.addKeyValue("envVar", envVar)
.addKeyValue("value", foundValue)
.log("Found configuration value.");
return returnValue;
} catch (RuntimeException e) {
LOGGER.atVerbose()
.addKeyValue("systemProperty", systemProperty)
.addKeyValue("envVar", envVar)
.addKeyValue("value", foundValue)
.addKeyValue("defaultValue", defaultValue)
.log("Failed to convert found configuration value, using default.");
return defaultValue;
}
}
private static final boolean VIRTUAL_THREAD_SUPPORTED;
private static final ReflectiveInvoker GET_VIRTUAL_THREAD_BUILDER;
private static final ReflectiveInvoker SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME;
private static final ReflectiveInvoker CREATE_VIRTUAL_THREAD_FACTORY;
static {
boolean virtualThreadSupported;
ReflectiveInvoker getVirtualThreadBuilder;
ReflectiveInvoker setVirtualThreadBuilderThreadName;
ReflectiveInvoker createVirtualThreadFactory;
try {
getVirtualThreadBuilder = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread").getDeclaredMethod("ofVirtual"));
setVirtualThreadBuilderThreadName = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread$Builder").getDeclaredMethod("name", String.class));
createVirtualThreadFactory = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread$Builder").getDeclaredMethod("factory"));
virtualThreadSupported = true;
LOGGER.verbose("Virtual threads are supported in the current runtime.");
} catch (Exception | LinkageError e) {
LOGGER.atVerbose()
.addKeyValue("runtime", System.getProperty("java.version"))
.log("Virtual threads are not supported in the current runtime.", e);
virtualThreadSupported = false;
getVirtualThreadBuilder = null;
setVirtualThreadBuilderThreadName = null;
createVirtualThreadFactory = null;
}
VIRTUAL_THREAD_SUPPORTED = virtualThreadSupported;
GET_VIRTUAL_THREAD_BUILDER = getVirtualThreadBuilder;
SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME = setVirtualThreadBuilderThreadName;
CREATE_VIRTUAL_THREAD_FACTORY = createVirtualThreadFactory;
}
private final ExecutorService executorService;
private SharedExecutorService() {
this.executorService = createSharedExecutor();
}
/**
* Gets the shared instance of the executor service.
*
* @return The shared instance of the executor service.
*/
public static SharedExecutorService getInstance() {
return INSTANCE;
}
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*
* @throws UnsupportedOperationException This method will always throw an exception.
*/
@Override
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*
* @return Nothing will be returned as an exception will always be thrown.
* @throws UnsupportedOperationException This method will always throw an exception.
*/
@Override
public List<Runnable> shutdownNow() {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be shut down."));
}
/**
* Checks if the executor service is shutdown.
* <p>
* Will always return false as the shared executor service cannot be shut down.
*
* @return False, as the shared executor service cannot be shut down.
*/
@Override
public boolean isShutdown() {
return false;
}
/**
* Checks if the executor service is terminated.
* <p>
* Will always return false as the shared executor service cannot be terminated.
*
* @return False, as the shared executor service cannot be terminated.
*/
@Override
public boolean isTerminated() {
return false;
}
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*
* @param timeout The amount of time to wait for the executor service to shutdown.
* @param unit The unit of time for the timeout.
* @return Nothing will be returned as an exception will always be thrown.
* @throws UnsupportedOperationException This method will always throw an exception.
*/
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be terminated."));
}
@Override
public void execute(Runnable command) {
ensureNotShutdown().execute(command);
}
@Override
public <T> Future<T> submit(Callable<T> task) {
return ensureNotShutdown().submit(task);
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
return ensureNotShutdown().submit(task, result);
}
@Override
public Future<?> submit(Runnable task) {
return ensureNotShutdown().submit(task);
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
return ensureNotShutdown().invokeAll(tasks);
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
return ensureNotShutdown().invokeAll(tasks, timeout, unit);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException {
return ensureNotShutdown().invokeAny(tasks);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return ensureNotShutdown().invokeAny(tasks, timeout, unit);
}
private ExecutorService ensureNotShutdown() {
return executorService;
}
private static ExecutorService createSharedExecutor() {
ThreadFactory threadFactory;
if (VIRTUAL_THREAD_SUPPORTED && THREAD_POOL_VIRTUAL) {
try {
LOGGER.verbose("Attempting to create a virtual thread factory.");
threadFactory = createVirtualThreadFactory();
LOGGER.verbose("Successfully created a virtual thread factory.");
} catch (Exception e) {
LOGGER.info("Failed to create a virtual thread factory, falling back to non-virtual threads.", e);
threadFactory = createNonVirtualThreadFactory();
}
} else {
threadFactory = createNonVirtualThreadFactory();
}
ExecutorService executorService = new ThreadPoolExecutor(0, THREAD_POOL_SIZE, THREAD_POOL_KEEP_ALIVE_MILLIS,
TimeUnit.MILLISECONDS, new SynchronousQueue<>(), threadFactory);
Thread shutdownThread = CoreUtils.createExecutorServiceShutdownThread(executorService, Duration.ofSeconds(5));
CoreUtils.addShutdownHookSafely(shutdownThread);
return executorService;
}
private static ThreadFactory createVirtualThreadFactory() throws Exception {
Object virtualThreadBuilder = GET_VIRTUAL_THREAD_BUILDER.invokeStatic();
SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME.invokeWithArguments(virtualThreadBuilder, AZURE_SDK_THREAD_NAME);
return (ThreadFactory) CREATE_VIRTUAL_THREAD_FACTORY.invokeWithArguments(virtualThreadBuilder);
}
private static ThreadFactory createNonVirtualThreadFactory() {
return r -> {
Thread thread = new Thread(r, AZURE_SDK_THREAD_NAME + AZURE_SDK_THREAD_COUNTER.getAndIncrement());
thread.setDaemon(true);
return thread;
};
}
} |
I made it so this `ExecutorService` couldn't be shut down as it was meant to be shared across multiple clients, none of them having exclusive ownership. And maybe this is a spot for an overall change in direction base on the other comment where we instead allow for our synchronous clients to be constructor with an `ExecutorService`. | public void shutdown() {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be shut down."));
} | throw LOGGER.logThrowableAsError( | public void shutdown() {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be shut down."));
} | class SharedExecutorService implements ExecutorService {
private static final ClientLogger LOGGER = new ClientLogger(SharedExecutorService.class);
private static final AtomicReference<SharedExecutorService> INSTANCE = new AtomicReference<>();
private static final AtomicLong AZURE_SDK_THREAD_COUNTER = new AtomicLong();
private static final String AZURE_SDK_THREAD_NAME = "azure-sdk-global-thread-";
private static final int THREAD_POOL_SIZE
= Configuration.getGlobalConfiguration().get("azure.sdk.threadPoolSize", config -> {
try {
int size = Integer.parseInt(config);
if (size <= 0) {
return 10 * Runtime.getRuntime().availableProcessors();
} else {
return size;
}
} catch (NumberFormatException ignored) {
return 10 * Runtime.getRuntime().availableProcessors();
}
});
private static final boolean VIRTUAL_THREAD_SUPPORTED;
private static final ReflectiveInvoker GET_VIRTUAL_THREAD_BUILDER;
private static final ReflectiveInvoker SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME;
private static final ReflectiveInvoker CREATE_VIRTUAL_THREAD_FACTORY;
static {
boolean virtualThreadSupported;
ReflectiveInvoker getVirtualThreadBuilder;
ReflectiveInvoker setVirtualThreadBuilderThreadName;
ReflectiveInvoker createVirtualThreadFactory;
try {
getVirtualThreadBuilder = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread").getDeclaredMethod("ofVirtual"));
setVirtualThreadBuilderThreadName = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread$Builder").getDeclaredMethod("name", String.class));
createVirtualThreadFactory = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread$Builder").getDeclaredMethod("factory"));
virtualThreadSupported = true;
} catch (Exception | LinkageError e) {
virtualThreadSupported = false;
getVirtualThreadBuilder = null;
setVirtualThreadBuilderThreadName = null;
createVirtualThreadFactory = null;
}
VIRTUAL_THREAD_SUPPORTED = virtualThreadSupported;
GET_VIRTUAL_THREAD_BUILDER = getVirtualThreadBuilder;
SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME = setVirtualThreadBuilderThreadName;
CREATE_VIRTUAL_THREAD_FACTORY = createVirtualThreadFactory;
}
private final ExecutorService wrappedExecutorService;
private final boolean internal;
private SharedExecutorService(ExecutorService executorService, boolean internal) {
this.wrappedExecutorService = executorService;
this.internal = internal;
}
/**
* Gets the shared instance of the executor service.
*
* @return The shared instance of the executor service.
*/
public static SharedExecutorService getInstance() {
return INSTANCE.updateAndGet(instance -> {
if (instance == null) {
return new SharedExecutorService(createSharedExecutor(), true);
} else {
return instance;
}
});
}
/**
* Sets the shared instance using the passed executor service.
* <p>
* If the executor service is already set, this will replace it with the new executor service. If the replaced
* executor service was created by this class, it will be shut down.
* <p>
* If the passed executor service is null, this will throw a {@link NullPointerException}. If the passed executor
*
* @param executorService The executor service to set as the shared instance.
* @throws NullPointerException If the passed executor service is null.
* @throws IllegalStateException If the passed executor service is shutdown or terminated.
*/
public static void setInstance(ExecutorService executorService) {
Objects.requireNonNull(executorService, "'executorService' cannot be null.");
if (executorService.isShutdown() || executorService.isTerminated()) {
throw new IllegalStateException("The passed executor service is shutdown or terminated.");
}
SharedExecutorService existing = INSTANCE.getAndSet(new SharedExecutorService(executorService, false));
if (existing != null && existing.internal) {
existing.wrappedExecutorService.shutdown();
}
}
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*/
@Override
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*
* @return Nothing will be returned as an exception will always be thrown.
*/
@Override
public List<Runnable> shutdownNow() {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be shut down."));
}
@Override
public boolean isShutdown() {
return wrappedExecutorService.isShutdown();
}
@Override
public boolean isTerminated() {
return wrappedExecutorService.isTerminated();
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) {
return false;
}
@Override
public void execute(Runnable command) {
wrappedExecutorService.execute(command);
}
@Override
public <T> Future<T> submit(Callable<T> task) {
return wrappedExecutorService.submit(task);
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
return wrappedExecutorService.submit(task, result);
}
@Override
public Future<?> submit(Runnable task) {
return wrappedExecutorService.submit(task);
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
return wrappedExecutorService.invokeAll(tasks);
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
return wrappedExecutorService.invokeAll(tasks, timeout, unit);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException {
return wrappedExecutorService.invokeAny(tasks);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return wrappedExecutorService.invokeAny(tasks, timeout, unit);
}
private static ExecutorService createSharedExecutor() {
ThreadFactory threadFactory;
if (VIRTUAL_THREAD_SUPPORTED) {
try {
threadFactory = createVirtualThreadFactory();
} catch (Exception e) {
threadFactory = createNonVirtualThreadFactory();
}
} else {
threadFactory = createNonVirtualThreadFactory();
}
ExecutorService sharedExecutor = CoreUtils.addShutdownHookSafely(
new ThreadPoolExecutor(0, THREAD_POOL_SIZE, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(), threadFactory),
Duration.ofSeconds(5));
ReferenceManager.INSTANCE.register(sharedExecutor, sharedExecutor::shutdown);
return sharedExecutor;
}
private static ThreadFactory createVirtualThreadFactory() throws Exception {
Object virtualThreadBuilder = GET_VIRTUAL_THREAD_BUILDER.invokeStatic();
SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME.invokeWithArguments(virtualThreadBuilder, AZURE_SDK_THREAD_NAME);
return (ThreadFactory) CREATE_VIRTUAL_THREAD_FACTORY.invokeWithArguments(virtualThreadBuilder);
}
private static ThreadFactory createNonVirtualThreadFactory() {
return r -> {
Thread thread = new Thread(r, AZURE_SDK_THREAD_NAME + AZURE_SDK_THREAD_COUNTER.getAndIncrement());
thread.setDaemon(true);
return thread;
};
}
} | class SharedExecutorService implements ExecutorService {
private static final ClientLogger LOGGER = new ClientLogger(SharedExecutorService.class);
private static final AtomicLong AZURE_SDK_THREAD_COUNTER = new AtomicLong();
private static final String AZURE_SDK_THREAD_NAME = "azure-sdk-global-thread-";
private static final int THREAD_POOL_SIZE;
private static final int THREAD_POOL_KEEP_ALIVE_MILLIS;
private static final boolean THREAD_POOL_VIRTUAL;
private static final SharedExecutorService INSTANCE;
static {
THREAD_POOL_SIZE
= getConfig("azure.sdk.shared.threadpool.maxpoolsize", "AZURE_SDK_SHARED_THREADPOOL_MAXPOOLSIZE",
Integer::parseInt, 10 * Runtime.getRuntime().availableProcessors());
THREAD_POOL_KEEP_ALIVE_MILLIS = getConfig("azure.sdk.shared.threadpool.keepalivemillis",
"AZURE_SDK_SHARED_THREADPOOL_KEEPALIVEMILLIS", Integer::parseInt, 60_000);
THREAD_POOL_VIRTUAL = getConfig("azure.sdk.shared.threadpool.usevirtualthreads",
"AZURE_SDK_SHARED_THREADPOOL_USEVIRTUALTHREADS", Boolean::parseBoolean, true);
INSTANCE = new SharedExecutorService();
}
private static <T> T getConfig(String systemProperty, String envVar, Function<String, T> converter,
T defaultValue) {
String foundValue = Configuration.getGlobalConfiguration()
.getFromEnvironment(systemProperty, envVar, ConfigurationProperty.REDACT_VALUE_SANITIZER);
if (foundValue == null) {
LOGGER.atVerbose()
.addKeyValue("systemProperty", systemProperty)
.addKeyValue("envVar", envVar)
.addKeyValue("defaultValue", defaultValue)
.log("Configuration value not found, using default.");
return defaultValue;
}
try {
T returnValue = converter.apply(foundValue);
LOGGER.atVerbose()
.addKeyValue("systemProperty", systemProperty)
.addKeyValue("envVar", envVar)
.addKeyValue("value", foundValue)
.log("Found configuration value.");
return returnValue;
} catch (RuntimeException e) {
LOGGER.atVerbose()
.addKeyValue("systemProperty", systemProperty)
.addKeyValue("envVar", envVar)
.addKeyValue("value", foundValue)
.addKeyValue("defaultValue", defaultValue)
.log("Failed to convert found configuration value, using default.");
return defaultValue;
}
}
private static final boolean VIRTUAL_THREAD_SUPPORTED;
private static final ReflectiveInvoker GET_VIRTUAL_THREAD_BUILDER;
private static final ReflectiveInvoker SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME;
private static final ReflectiveInvoker CREATE_VIRTUAL_THREAD_FACTORY;
static {
boolean virtualThreadSupported;
ReflectiveInvoker getVirtualThreadBuilder;
ReflectiveInvoker setVirtualThreadBuilderThreadName;
ReflectiveInvoker createVirtualThreadFactory;
try {
getVirtualThreadBuilder = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread").getDeclaredMethod("ofVirtual"));
setVirtualThreadBuilderThreadName = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread$Builder").getDeclaredMethod("name", String.class));
createVirtualThreadFactory = ReflectionUtils.getMethodInvoker(null,
Class.forName("java.lang.Thread$Builder").getDeclaredMethod("factory"));
virtualThreadSupported = true;
LOGGER.verbose("Virtual threads are supported in the current runtime.");
} catch (Exception | LinkageError e) {
LOGGER.atVerbose()
.addKeyValue("runtime", System.getProperty("java.version"))
.log("Virtual threads are not supported in the current runtime.", e);
virtualThreadSupported = false;
getVirtualThreadBuilder = null;
setVirtualThreadBuilderThreadName = null;
createVirtualThreadFactory = null;
}
VIRTUAL_THREAD_SUPPORTED = virtualThreadSupported;
GET_VIRTUAL_THREAD_BUILDER = getVirtualThreadBuilder;
SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME = setVirtualThreadBuilderThreadName;
CREATE_VIRTUAL_THREAD_FACTORY = createVirtualThreadFactory;
}
private final ExecutorService executorService;
private SharedExecutorService() {
this.executorService = createSharedExecutor();
}
/**
* Gets the shared instance of the executor service.
*
* @return The shared instance of the executor service.
*/
public static SharedExecutorService getInstance() {
return INSTANCE;
}
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*
* @throws UnsupportedOperationException This method will always throw an exception.
*/
@Override
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*
* @return Nothing will be returned as an exception will always be thrown.
* @throws UnsupportedOperationException This method will always throw an exception.
*/
@Override
public List<Runnable> shutdownNow() {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be shut down."));
}
/**
* Checks if the executor service is shutdown.
* <p>
* Will always return false as the shared executor service cannot be shut down.
*
* @return False, as the shared executor service cannot be shut down.
*/
@Override
public boolean isShutdown() {
return false;
}
/**
* Checks if the executor service is terminated.
* <p>
* Will always return false as the shared executor service cannot be terminated.
*
* @return False, as the shared executor service cannot be terminated.
*/
@Override
public boolean isTerminated() {
return false;
}
/**
* Shutdown isn't supported for this executor service as it is shared by multiple consumers.
* <p>
* Calling this method will result in an {@link UnsupportedOperationException} being thrown.
*
* @param timeout The amount of time to wait for the executor service to shutdown.
* @param unit The unit of time for the timeout.
* @return Nothing will be returned as an exception will always be thrown.
* @throws UnsupportedOperationException This method will always throw an exception.
*/
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) {
throw LOGGER.logThrowableAsError(
new UnsupportedOperationException("This executor service is shared and cannot be terminated."));
}
@Override
public void execute(Runnable command) {
ensureNotShutdown().execute(command);
}
@Override
public <T> Future<T> submit(Callable<T> task) {
return ensureNotShutdown().submit(task);
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
return ensureNotShutdown().submit(task, result);
}
@Override
public Future<?> submit(Runnable task) {
return ensureNotShutdown().submit(task);
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
return ensureNotShutdown().invokeAll(tasks);
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
return ensureNotShutdown().invokeAll(tasks, timeout, unit);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException {
return ensureNotShutdown().invokeAny(tasks);
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return ensureNotShutdown().invokeAny(tasks, timeout, unit);
}
private ExecutorService ensureNotShutdown() {
return executorService;
}
private static ExecutorService createSharedExecutor() {
ThreadFactory threadFactory;
if (VIRTUAL_THREAD_SUPPORTED && THREAD_POOL_VIRTUAL) {
try {
LOGGER.verbose("Attempting to create a virtual thread factory.");
threadFactory = createVirtualThreadFactory();
LOGGER.verbose("Successfully created a virtual thread factory.");
} catch (Exception e) {
LOGGER.info("Failed to create a virtual thread factory, falling back to non-virtual threads.", e);
threadFactory = createNonVirtualThreadFactory();
}
} else {
threadFactory = createNonVirtualThreadFactory();
}
ExecutorService executorService = new ThreadPoolExecutor(0, THREAD_POOL_SIZE, THREAD_POOL_KEEP_ALIVE_MILLIS,
TimeUnit.MILLISECONDS, new SynchronousQueue<>(), threadFactory);
Thread shutdownThread = CoreUtils.createExecutorServiceShutdownThread(executorService, Duration.ofSeconds(5));
CoreUtils.addShutdownHookSafely(shutdownThread);
return executorService;
}
private static ThreadFactory createVirtualThreadFactory() throws Exception {
Object virtualThreadBuilder = GET_VIRTUAL_THREAD_BUILDER.invokeStatic();
SET_VIRTUAL_THREAD_BUILDER_THREAD_NAME.invokeWithArguments(virtualThreadBuilder, AZURE_SDK_THREAD_NAME);
return (ThreadFactory) CREATE_VIRTUAL_THREAD_FACTORY.invokeWithArguments(virtualThreadBuilder);
}
private static ThreadFactory createNonVirtualThreadFactory() {
return r -> {
Thread thread = new Thread(r, AZURE_SDK_THREAD_NAME + AZURE_SDK_THREAD_COUNTER.getAndIncrement());
thread.setDaemon(true);
return thread;
};
}
} |
This should check for `startsWith` instead of `equals` as sometimes the service returns `text/event-stream; charset=utf8` as the content type. | private static boolean isTextEventStream(okhttp3.Headers responseHeaders) {
return responseHeaders != null && responseHeaders.get(HeaderName.CONTENT_TYPE.toString()) != null &&
Objects.equals(responseHeaders.get(HeaderName.CONTENT_TYPE.toString()), ContentType.TEXT_EVENT_STREAM);
} | Objects.equals(responseHeaders.get(HeaderName.CONTENT_TYPE.toString()), ContentType.TEXT_EVENT_STREAM); | private static boolean isTextEventStream(okhttp3.Headers responseHeaders) {
if (responseHeaders != null) {
return ServerSentEventUtil
.isTextEventStreamContentType(responseHeaders.get(HttpHeaderName.CONTENT_TYPE.toString()));
}
return false;
} | class OkHttpHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpHttpClient.class);
private static final byte[] EMPTY_BODY = new byte[0];
private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY);
final OkHttpClient httpClient;
OkHttpHttpClient(OkHttpClient httpClient) {
this.httpClient = httpClient;
}
@Override
public Response<?> send(HttpRequest request) {
boolean eagerlyConvertHeaders = request.getMetadata().isEagerlyConvertHeaders();
boolean eagerlyReadResponse = request.getMetadata().isEagerlyReadResponse();
boolean ignoreResponseBody = request.getMetadata().isIgnoreResponseBody();
Request okHttpRequest = toOkHttpRequest(request);
try {
okhttp3.Response okHttpResponse = httpClient.newCall(okHttpRequest).execute();
return toResponse(request, okHttpResponse, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders);
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Converts the given generic-core request to okhttp request.
*
* @param request the generic-core request.
*
* @return Th eOkHttp request.
*/
private okhttp3.Request toOkHttpRequest(HttpRequest request) {
Request.Builder requestBuilder = new Request.Builder()
.url(request.getUrl());
if (request.getHeaders() != null) {
for (Header hdr : request.getHeaders()) {
hdr.getValues().forEach(value -> requestBuilder.addHeader(hdr.getName(), value));
}
}
if (request.getHttpMethod() == HttpMethod.GET) {
return requestBuilder.get().build();
} else if (request.getHttpMethod() == HttpMethod.HEAD) {
return requestBuilder.head().build();
}
RequestBody okHttpRequestBody = toOkHttpRequestBody(request.getBody(), request.getHeaders());
return requestBuilder.method(request.getHttpMethod().toString(), okHttpRequestBody).build();
}
/**
* Create a Mono of okhttp3.RequestBody from the given BinaryData.
*
* @param bodyContent The request body content
* @param headers the headers associated with the original request
*
* @return The Mono emitting okhttp request
*/
private RequestBody toOkHttpRequestBody(BinaryData bodyContent, Headers headers) {
if (bodyContent == null) {
return EMPTY_REQUEST_BODY;
}
String contentType = headers.getValue(HeaderName.CONTENT_TYPE);
MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType);
if (bodyContent instanceof InputStreamBinaryData) {
long effectiveContentLength = getRequestContentLength(bodyContent, headers);
return new OkHttpInputStreamRequestBody((InputStreamBinaryData) bodyContent, effectiveContentLength,
mediaType);
} else if (bodyContent instanceof FileBinaryData) {
long effectiveContentLength = getRequestContentLength(bodyContent, headers);
return new OkHttpFileRequestBody((FileBinaryData) bodyContent, effectiveContentLength, mediaType);
} else {
return RequestBody.create(bodyContent.toBytes(), mediaType);
}
}
private static long getRequestContentLength(BinaryData content, Headers headers) {
Long contentLength = content.getLength();
if (contentLength == null) {
String contentLengthHeaderValue = headers.getValue(HeaderName.CONTENT_LENGTH);
if (contentLengthHeaderValue != null) {
contentLength = Long.parseLong(contentLengthHeaderValue);
} else {
contentLength = -1L;
}
}
return contentLength;
}
private Response<?> toResponse(HttpRequest request, okhttp3.Response response, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
okhttp3.Headers responseHeaders = response.headers();
if (isTextEventStream(responseHeaders)) {
return processServerSentEvent(request, response, eagerlyConvertHeaders);
} else {
return processResponse(request, response, eagerlyReadResponse, ignoreResponseBody, eagerlyConvertHeaders);
}
}
private OkHttpResponse processServerSentEvent(HttpRequest request, okhttp3.Response response,
boolean eagerlyConvertHeaders) {
ServerSentEventListener listener = request.getServerSentEventListener();
if (listener != null && response.body() != null) {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.body().byteStream(),
StandardCharsets.UTF_8))) {
RetrySSEResult retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null && !retryExceptionForSSE(retrySSEResult, listener, request)
&& !Thread.currentThread().isInterrupted()) {
this.send(request);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
} else {
LOGGER.atInfo().log(() -> NO_LISTENER_LOG_MESSAGE);
}
return new OkHttpResponse(response, request, eagerlyConvertHeaders, EMPTY_BODY);
}
private Response<?> processResponse(HttpRequest request, okhttp3.Response response, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
if (eagerlyReadResponse || ignoreResponseBody) {
try (ResponseBody body = response.body()) {
byte[] bytes = (body != null) ? body.bytes() : EMPTY_BODY;
return new OkHttpResponse(response, request, eagerlyConvertHeaders, bytes);
}
} else {
return new OkHttpResponse(response, request, eagerlyConvertHeaders, null);
}
}
} | class OkHttpHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpHttpClient.class);
private static final byte[] EMPTY_BODY = new byte[0];
private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY);
final OkHttpClient httpClient;
OkHttpHttpClient(OkHttpClient httpClient) {
this.httpClient = httpClient;
}
@Override
public Response<?> send(HttpRequest request) {
boolean eagerlyConvertHeaders = request.getMetadata().isEagerlyConvertHeaders();
boolean eagerlyReadResponse = request.getMetadata().isEagerlyReadResponse();
boolean ignoreResponseBody = request.getMetadata().isIgnoreResponseBody();
Request okHttpRequest = toOkHttpRequest(request);
try {
okhttp3.Response okHttpResponse = httpClient.newCall(okHttpRequest).execute();
return toResponse(request, okHttpResponse, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders);
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Converts the given generic-core request to okhttp request.
*
* @param request the generic-core request.
*
* @return Th eOkHttp request.
*/
private okhttp3.Request toOkHttpRequest(HttpRequest request) {
Request.Builder requestBuilder = new Request.Builder()
.url(request.getUrl());
if (request.getHeaders() != null) {
for (HttpHeader hdr : request.getHeaders()) {
hdr.getValues().forEach(value -> requestBuilder.addHeader(hdr.getName(), value));
}
}
if (request.getHttpMethod() == HttpMethod.GET) {
return requestBuilder.get().build();
} else if (request.getHttpMethod() == HttpMethod.HEAD) {
return requestBuilder.head().build();
}
RequestBody okHttpRequestBody = toOkHttpRequestBody(request.getBody(), request.getHeaders());
return requestBuilder.method(request.getHttpMethod().toString(), okHttpRequestBody).build();
}
/**
* Create a Mono of okhttp3.RequestBody from the given BinaryData.
*
* @param bodyContent The request body content
* @param headers the headers associated with the original request
*
* @return The Mono emitting okhttp request
*/
private RequestBody toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) {
if (bodyContent == null) {
return EMPTY_REQUEST_BODY;
}
String contentType = headers.getValue(HttpHeaderName.CONTENT_TYPE);
MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType);
if (bodyContent instanceof InputStreamBinaryData) {
long effectiveContentLength = getRequestContentLength(bodyContent, headers);
return new OkHttpInputStreamRequestBody((InputStreamBinaryData) bodyContent, effectiveContentLength,
mediaType);
} else if (bodyContent instanceof FileBinaryData) {
long effectiveContentLength = getRequestContentLength(bodyContent, headers);
return new OkHttpFileRequestBody((FileBinaryData) bodyContent, effectiveContentLength, mediaType);
} else {
return RequestBody.create(bodyContent.toBytes(), mediaType);
}
}
private static long getRequestContentLength(BinaryData content, HttpHeaders headers) {
Long contentLength = content.getLength();
if (contentLength == null) {
String contentLengthHeaderValue = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLengthHeaderValue != null) {
contentLength = Long.parseLong(contentLengthHeaderValue);
} else {
contentLength = -1L;
}
}
return contentLength;
}
private Response<?> toResponse(HttpRequest request, okhttp3.Response response, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
okhttp3.Headers responseHeaders = response.headers();
if (isTextEventStream(responseHeaders) && response.body() != null) {
ServerSentEventListener listener = request.getServerSentEventListener();
if (listener != null) {
processTextEventStream(request,
httpRequest -> this.send(httpRequest), response.body().byteStream(), listener, LOGGER);
} else {
throw LOGGER.logThrowableAsError(new RuntimeException(ServerSentEventUtil.NO_LISTENER_ERROR_MESSAGE));
}
return new OkHttpResponse(response, request, eagerlyConvertHeaders, EMPTY_BODY);
}
return processResponse(request, response, eagerlyReadResponse, ignoreResponseBody, eagerlyConvertHeaders);
}
private Response<?> processResponse(HttpRequest request, okhttp3.Response response, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
if (eagerlyReadResponse || ignoreResponseBody) {
try (ResponseBody body = response.body()) {
byte[] bytes = (body != null) ? body.bytes() : EMPTY_BODY;
return new OkHttpResponse(response, request, eagerlyConvertHeaders, bytes);
}
} else {
return new OkHttpResponse(response, request, eagerlyConvertHeaders, null);
}
}
} |
Since this is internal and the use cases are limited, maybe convert to OffsetDateTime on creation? | public OffsetDateTime getTokenExpiration() {
OffsetDateTime tokenExpiry;
if (getExpiresOnUnixTime() != null) {
tokenExpiry = EPOCH.plusSeconds(getExpiresOnUnixTime());
} else {
tokenExpiry = parseExpiresOnTime(getExpiresOn());
}
return tokenExpiry;
} | } | public OffsetDateTime getTokenExpiration() {
return tokenExpiry;
} | class AzureCliToken implements JsonSerializable<AzureCliToken> {
private static final OffsetDateTime EPOCH = OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
private String accessToken;
private String expiresOn;
private Long expiresOnUnixTime;
private String subscription;
private String tenant;
private String tokenType;
public AzureCliToken accessToken(String accessToken) {
this.accessToken = accessToken;
return this;
}
public AzureCliToken expiresOn(String expiresOn) {
this.expiresOn = expiresOn;
return this;
}
public AzureCliToken expiresOnUnixTime(Long expiresOnUnixTime) {
this.expiresOnUnixTime = expiresOnUnixTime;
return this;
}
public AzureCliToken subscription(String subscription) {
this.subscription = subscription;
return this;
}
public AzureCliToken tenant(String tenant) {
this.tenant = tenant;
return this;
}
public AzureCliToken tokenType(String tokenType) {
this.tokenType = tokenType;
return this;
}
public String getAccessToken() {
return accessToken;
}
public String getExpiresOn() {
return expiresOn;
}
public Long getExpiresOnUnixTime() {
return expiresOnUnixTime;
}
public String getSubscription() {
return subscription;
}
public String getTenant() {
return tenant;
}
public String getTokenType() {
return tokenType;
}
private OffsetDateTime parseExpiresOnTime(String time) {
OffsetDateTime tokenExpiry;
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
tokenExpiry = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
return tokenExpiry;
}
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("accessToken", accessToken);
jsonWriter.writeStringField("expiresOn", expiresOn);
jsonWriter.writeNumberField("expiresOnUnixTime", expiresOnUnixTime);
jsonWriter.writeStringField("subscription", subscription);
jsonWriter.writeStringField("tenant", tenant);
jsonWriter.writeStringField("tokenType", tokenType);
jsonWriter.writeEndObject();
return jsonWriter;
}
public static AzureCliToken fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
AzureCliToken tokenHolder = new AzureCliToken();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("accessToken".equals(fieldName)) {
tokenHolder.accessToken(reader.getString());
} else if ("expiresOn".equals(fieldName)) {
tokenHolder.expiresOn(reader.getString());
} else if ("expires_on".equals(fieldName)) {
tokenHolder.expiresOnUnixTime(reader.getLong());
} else if ("subscription".equals(fieldName)) {
tokenHolder.subscription(reader.getString());
} else if ("tenant".equals(fieldName)) {
tokenHolder.tenant(reader.getString());
} else if ("tokenType".equals(fieldName)) {
tokenHolder.tokenType(reader.getString());
} else {
reader.skipChildren();
}
}
return tokenHolder;
});
}
} | class AzureCliToken implements JsonSerializable<AzureCliToken> {
private String accessToken;
private String expiresOn;
private Long expiresOnUnixTime;
private String subscription;
private String tenant;
private String tokenType;
private OffsetDateTime tokenExpiry;
public String getAccessToken() {
return accessToken;
}
public String getExpiresOn() {
return expiresOn;
}
public Long getExpiresOnUnixTime() {
return expiresOnUnixTime;
}
public String getSubscription() {
return subscription;
}
public String getTenant() {
return tenant;
}
public String getTokenType() {
return tokenType;
}
private static OffsetDateTime parseExpiresOnTime(String time) {
OffsetDateTime tokenExpiry;
tokenExpiry = LocalDateTime.parse(time, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSS"))
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
return tokenExpiry;
}
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("accessToken", accessToken);
jsonWriter.writeStringField("expiresOn", expiresOn);
jsonWriter.writeNumberField("expires_on", expiresOnUnixTime);
jsonWriter.writeStringField("subscription", subscription);
jsonWriter.writeStringField("tenant", tenant);
jsonWriter.writeStringField("tokenType", tokenType);
jsonWriter.writeEndObject();
return jsonWriter;
}
public static AzureCliToken fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
AzureCliToken tokenHolder = new AzureCliToken();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("accessToken".equals(fieldName)) {
tokenHolder.accessToken = reader.getString();
} else if ("expiresOn".equals(fieldName)) {
tokenHolder.expiresOn = reader.getString();
} else if ("expires_on".equals(fieldName)) {
tokenHolder.expiresOnUnixTime = reader.getLong();
} else if ("subscription".equals(fieldName)) {
tokenHolder.subscription = reader.getString();
} else if ("tenant".equals(fieldName)) {
tokenHolder.tenant = reader.getString();
} else if ("tokenType".equals(fieldName)) {
tokenHolder.tokenType = reader.getString();
} else {
reader.skipChildren();
}
}
if (tokenHolder.expiresOnUnixTime != null) {
tokenHolder.tokenExpiry = Instant.ofEpochSecond(tokenHolder.getExpiresOnUnixTime()).atOffset(ZoneOffset.UTC);
} else {
tokenHolder.tokenExpiry = parseExpiresOnTime(tokenHolder.getExpiresOn());
}
return tokenHolder;
});
}
} |
Shouldn't this be `expires_on`? | public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("accessToken", accessToken);
jsonWriter.writeStringField("expiresOn", expiresOn);
jsonWriter.writeNumberField("expiresOnUnixTime", expiresOnUnixTime);
jsonWriter.writeStringField("subscription", subscription);
jsonWriter.writeStringField("tenant", tenant);
jsonWriter.writeStringField("tokenType", tokenType);
jsonWriter.writeEndObject();
return jsonWriter;
} | jsonWriter.writeNumberField("expiresOnUnixTime", expiresOnUnixTime); | public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("accessToken", accessToken);
jsonWriter.writeStringField("expiresOn", expiresOn);
jsonWriter.writeNumberField("expires_on", expiresOnUnixTime);
jsonWriter.writeStringField("subscription", subscription);
jsonWriter.writeStringField("tenant", tenant);
jsonWriter.writeStringField("tokenType", tokenType);
jsonWriter.writeEndObject();
return jsonWriter;
} | class AzureCliToken implements JsonSerializable<AzureCliToken> {
private static final OffsetDateTime EPOCH = OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
private String accessToken;
private String expiresOn;
private Long expiresOnUnixTime;
private String subscription;
private String tenant;
private String tokenType;
public AzureCliToken accessToken(String accessToken) {
this.accessToken = accessToken;
return this;
}
public AzureCliToken expiresOn(String expiresOn) {
this.expiresOn = expiresOn;
return this;
}
public AzureCliToken expiresOnUnixTime(Long expiresOnUnixTime) {
this.expiresOnUnixTime = expiresOnUnixTime;
return this;
}
public AzureCliToken subscription(String subscription) {
this.subscription = subscription;
return this;
}
public AzureCliToken tenant(String tenant) {
this.tenant = tenant;
return this;
}
public AzureCliToken tokenType(String tokenType) {
this.tokenType = tokenType;
return this;
}
public String getAccessToken() {
return accessToken;
}
public String getExpiresOn() {
return expiresOn;
}
public Long getExpiresOnUnixTime() {
return expiresOnUnixTime;
}
public String getSubscription() {
return subscription;
}
public String getTenant() {
return tenant;
}
public String getTokenType() {
return tokenType;
}
public OffsetDateTime getTokenExpiration() {
OffsetDateTime tokenExpiry;
if (getExpiresOnUnixTime() != null) {
tokenExpiry = EPOCH.plusSeconds(getExpiresOnUnixTime());
} else {
tokenExpiry = parseExpiresOnTime(getExpiresOn());
}
return tokenExpiry;
}
private OffsetDateTime parseExpiresOnTime(String time) {
OffsetDateTime tokenExpiry;
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
tokenExpiry = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
return tokenExpiry;
}
public static AzureCliToken fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
AzureCliToken tokenHolder = new AzureCliToken();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("accessToken".equals(fieldName)) {
tokenHolder.accessToken(reader.getString());
} else if ("expiresOn".equals(fieldName)) {
tokenHolder.expiresOn(reader.getString());
} else if ("expires_on".equals(fieldName)) {
tokenHolder.expiresOnUnixTime(reader.getLong());
} else if ("subscription".equals(fieldName)) {
tokenHolder.subscription(reader.getString());
} else if ("tenant".equals(fieldName)) {
tokenHolder.tenant(reader.getString());
} else if ("tokenType".equals(fieldName)) {
tokenHolder.tokenType(reader.getString());
} else {
reader.skipChildren();
}
}
return tokenHolder;
});
}
} | class AzureCliToken implements JsonSerializable<AzureCliToken> {
private String accessToken;
private String expiresOn;
private Long expiresOnUnixTime;
private String subscription;
private String tenant;
private String tokenType;
private OffsetDateTime tokenExpiry;
public String getAccessToken() {
return accessToken;
}
public String getExpiresOn() {
return expiresOn;
}
public Long getExpiresOnUnixTime() {
return expiresOnUnixTime;
}
public String getSubscription() {
return subscription;
}
public String getTenant() {
return tenant;
}
public String getTokenType() {
return tokenType;
}
public OffsetDateTime getTokenExpiration() {
return tokenExpiry;
}
private static OffsetDateTime parseExpiresOnTime(String time) {
OffsetDateTime tokenExpiry;
tokenExpiry = LocalDateTime.parse(time, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSS"))
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
return tokenExpiry;
}
public static AzureCliToken fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
AzureCliToken tokenHolder = new AzureCliToken();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("accessToken".equals(fieldName)) {
tokenHolder.accessToken = reader.getString();
} else if ("expiresOn".equals(fieldName)) {
tokenHolder.expiresOn = reader.getString();
} else if ("expires_on".equals(fieldName)) {
tokenHolder.expiresOnUnixTime = reader.getLong();
} else if ("subscription".equals(fieldName)) {
tokenHolder.subscription = reader.getString();
} else if ("tenant".equals(fieldName)) {
tokenHolder.tenant = reader.getString();
} else if ("tokenType".equals(fieldName)) {
tokenHolder.tokenType = reader.getString();
} else {
reader.skipChildren();
}
}
if (tokenHolder.expiresOnUnixTime != null) {
tokenHolder.tokenExpiry = Instant.ofEpochSecond(tokenHolder.getExpiresOnUnixTime()).atOffset(ZoneOffset.UTC);
} else {
tokenHolder.tokenExpiry = parseExpiresOnTime(tokenHolder.getExpiresOn());
}
return tokenHolder;
});
}
} |
Yeah, good point. I also realized the setters are useless so I lost them. | public OffsetDateTime getTokenExpiration() {
OffsetDateTime tokenExpiry;
if (getExpiresOnUnixTime() != null) {
tokenExpiry = EPOCH.plusSeconds(getExpiresOnUnixTime());
} else {
tokenExpiry = parseExpiresOnTime(getExpiresOn());
}
return tokenExpiry;
} | } | public OffsetDateTime getTokenExpiration() {
return tokenExpiry;
} | class AzureCliToken implements JsonSerializable<AzureCliToken> {
private static final OffsetDateTime EPOCH = OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
private String accessToken;
private String expiresOn;
private Long expiresOnUnixTime;
private String subscription;
private String tenant;
private String tokenType;
public AzureCliToken accessToken(String accessToken) {
this.accessToken = accessToken;
return this;
}
public AzureCliToken expiresOn(String expiresOn) {
this.expiresOn = expiresOn;
return this;
}
public AzureCliToken expiresOnUnixTime(Long expiresOnUnixTime) {
this.expiresOnUnixTime = expiresOnUnixTime;
return this;
}
public AzureCliToken subscription(String subscription) {
this.subscription = subscription;
return this;
}
public AzureCliToken tenant(String tenant) {
this.tenant = tenant;
return this;
}
public AzureCliToken tokenType(String tokenType) {
this.tokenType = tokenType;
return this;
}
public String getAccessToken() {
return accessToken;
}
public String getExpiresOn() {
return expiresOn;
}
public Long getExpiresOnUnixTime() {
return expiresOnUnixTime;
}
public String getSubscription() {
return subscription;
}
public String getTenant() {
return tenant;
}
public String getTokenType() {
return tokenType;
}
private OffsetDateTime parseExpiresOnTime(String time) {
OffsetDateTime tokenExpiry;
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
tokenExpiry = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
return tokenExpiry;
}
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("accessToken", accessToken);
jsonWriter.writeStringField("expiresOn", expiresOn);
jsonWriter.writeNumberField("expiresOnUnixTime", expiresOnUnixTime);
jsonWriter.writeStringField("subscription", subscription);
jsonWriter.writeStringField("tenant", tenant);
jsonWriter.writeStringField("tokenType", tokenType);
jsonWriter.writeEndObject();
return jsonWriter;
}
public static AzureCliToken fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
AzureCliToken tokenHolder = new AzureCliToken();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("accessToken".equals(fieldName)) {
tokenHolder.accessToken(reader.getString());
} else if ("expiresOn".equals(fieldName)) {
tokenHolder.expiresOn(reader.getString());
} else if ("expires_on".equals(fieldName)) {
tokenHolder.expiresOnUnixTime(reader.getLong());
} else if ("subscription".equals(fieldName)) {
tokenHolder.subscription(reader.getString());
} else if ("tenant".equals(fieldName)) {
tokenHolder.tenant(reader.getString());
} else if ("tokenType".equals(fieldName)) {
tokenHolder.tokenType(reader.getString());
} else {
reader.skipChildren();
}
}
return tokenHolder;
});
}
} | class AzureCliToken implements JsonSerializable<AzureCliToken> {
private String accessToken;
private String expiresOn;
private Long expiresOnUnixTime;
private String subscription;
private String tenant;
private String tokenType;
private OffsetDateTime tokenExpiry;
public String getAccessToken() {
return accessToken;
}
public String getExpiresOn() {
return expiresOn;
}
public Long getExpiresOnUnixTime() {
return expiresOnUnixTime;
}
public String getSubscription() {
return subscription;
}
public String getTenant() {
return tenant;
}
public String getTokenType() {
return tokenType;
}
private static OffsetDateTime parseExpiresOnTime(String time) {
OffsetDateTime tokenExpiry;
tokenExpiry = LocalDateTime.parse(time, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSS"))
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
return tokenExpiry;
}
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("accessToken", accessToken);
jsonWriter.writeStringField("expiresOn", expiresOn);
jsonWriter.writeNumberField("expires_on", expiresOnUnixTime);
jsonWriter.writeStringField("subscription", subscription);
jsonWriter.writeStringField("tenant", tenant);
jsonWriter.writeStringField("tokenType", tokenType);
jsonWriter.writeEndObject();
return jsonWriter;
}
public static AzureCliToken fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
AzureCliToken tokenHolder = new AzureCliToken();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("accessToken".equals(fieldName)) {
tokenHolder.accessToken = reader.getString();
} else if ("expiresOn".equals(fieldName)) {
tokenHolder.expiresOn = reader.getString();
} else if ("expires_on".equals(fieldName)) {
tokenHolder.expiresOnUnixTime = reader.getLong();
} else if ("subscription".equals(fieldName)) {
tokenHolder.subscription = reader.getString();
} else if ("tenant".equals(fieldName)) {
tokenHolder.tenant = reader.getString();
} else if ("tokenType".equals(fieldName)) {
tokenHolder.tokenType = reader.getString();
} else {
reader.skipChildren();
}
}
if (tokenHolder.expiresOnUnixTime != null) {
tokenHolder.tokenExpiry = Instant.ofEpochSecond(tokenHolder.getExpiresOnUnixTime()).atOffset(ZoneOffset.UTC);
} else {
tokenHolder.tokenExpiry = parseExpiresOnTime(tokenHolder.getExpiresOn());
}
return tokenHolder;
});
}
} |
No retry in this case, so exceptions should be logged | public static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule, File tempDir, LocalStorageStats localStorageStats) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule::shutdown);
TelemetryPipelineListener telemetryPipelineListener;
if (tempDir == null) {
telemetryPipelineListener =
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service", false, true, " (telemetry will be lost)");
} else {
telemetryPipelineListener =
TelemetryPipelineListener.composite(
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service",
true,
false, " (telemetry will be stored to disk and retried)"),
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
localStorageStats,
false));
}
return new TelemetryItemExporter(telemetryPipeline, telemetryPipelineListener);
} | "Sending telemetry to the ingestion service", false, true, " (telemetry will be lost)"); | public static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule, File tempDir, LocalStorageStats localStorageStats) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule::shutdown);
TelemetryPipelineListener telemetryPipelineListener;
if (tempDir == null) {
telemetryPipelineListener =
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service", true, " (telemetry will be lost)");
} else {
telemetryPipelineListener =
TelemetryPipelineListener.composite(
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service",
false,
""),
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
localStorageStats,
false));
}
return new TelemetryItemExporter(telemetryPipeline, telemetryPipelineListener);
} | class AzureMonitorHelper {
public static TelemetryItemExporter createStatsbeatTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule, File tempDir) {
TelemetryPipeline statsbeatTelemetryPipeline = new TelemetryPipeline(httpPipeline, null);
TelemetryPipelineListener statsbeatTelemetryPipelineListener;
if (tempDir == null) {
statsbeatTelemetryPipelineListener = new StatsbeatTelemetryPipelineListener(statsbeatModule::shutdown);
} else {
LocalStorageTelemetryPipelineListener localStorageTelemetryPipelineListener =
new LocalStorageTelemetryPipelineListener(
1,
TempDirs.getSubDir(tempDir, "statsbeat"),
statsbeatTelemetryPipeline,
LocalStorageStats.noop(),
true);
statsbeatTelemetryPipelineListener =
TelemetryPipelineListener.composite(
new StatsbeatTelemetryPipelineListener(
() -> {
statsbeatModule.shutdown();
localStorageTelemetryPipelineListener.shutdown();
}),
localStorageTelemetryPipelineListener);
}
return new TelemetryItemExporter(statsbeatTelemetryPipeline, statsbeatTelemetryPipelineListener);
}
private AzureMonitorHelper(){}
} | class AzureMonitorHelper {
public static TelemetryItemExporter createStatsbeatTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule, File tempDir) {
TelemetryPipeline statsbeatTelemetryPipeline = new TelemetryPipeline(httpPipeline, null);
TelemetryPipelineListener statsbeatTelemetryPipelineListener;
if (tempDir == null) {
statsbeatTelemetryPipelineListener = new StatsbeatTelemetryPipelineListener(statsbeatModule::shutdown);
} else {
LocalStorageTelemetryPipelineListener localStorageTelemetryPipelineListener =
new LocalStorageTelemetryPipelineListener(
1,
TempDirs.getSubDir(tempDir, "statsbeat"),
statsbeatTelemetryPipeline,
LocalStorageStats.noop(),
true);
statsbeatTelemetryPipelineListener =
TelemetryPipelineListener.composite(
new StatsbeatTelemetryPipelineListener(
() -> {
statsbeatModule.shutdown();
localStorageTelemetryPipelineListener.shutdown();
}),
localStorageTelemetryPipelineListener);
}
return new TelemetryItemExporter(statsbeatTelemetryPipeline, statsbeatTelemetryPipelineListener);
}
private AzureMonitorHelper(){}
} |
```suggestion ""), ``` | public static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule, File tempDir, LocalStorageStats localStorageStats) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule::shutdown);
TelemetryPipelineListener telemetryPipelineListener;
if (tempDir == null) {
telemetryPipelineListener =
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service", true, " (telemetry will be lost)");
} else {
telemetryPipelineListener =
TelemetryPipelineListener.composite(
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service",
false,
" (telemetry will be stored to disk and retried)"),
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
localStorageStats,
false));
}
return new TelemetryItemExporter(telemetryPipeline, telemetryPipelineListener);
} | " (telemetry will be stored to disk and retried)"), | public static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule, File tempDir, LocalStorageStats localStorageStats) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule::shutdown);
TelemetryPipelineListener telemetryPipelineListener;
if (tempDir == null) {
telemetryPipelineListener =
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service", true, " (telemetry will be lost)");
} else {
telemetryPipelineListener =
TelemetryPipelineListener.composite(
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service",
false,
""),
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
localStorageStats,
false));
}
return new TelemetryItemExporter(telemetryPipeline, telemetryPipelineListener);
} | class AzureMonitorHelper {
public static TelemetryItemExporter createStatsbeatTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule, File tempDir) {
TelemetryPipeline statsbeatTelemetryPipeline = new TelemetryPipeline(httpPipeline, null);
TelemetryPipelineListener statsbeatTelemetryPipelineListener;
if (tempDir == null) {
statsbeatTelemetryPipelineListener = new StatsbeatTelemetryPipelineListener(statsbeatModule::shutdown);
} else {
LocalStorageTelemetryPipelineListener localStorageTelemetryPipelineListener =
new LocalStorageTelemetryPipelineListener(
1,
TempDirs.getSubDir(tempDir, "statsbeat"),
statsbeatTelemetryPipeline,
LocalStorageStats.noop(),
true);
statsbeatTelemetryPipelineListener =
TelemetryPipelineListener.composite(
new StatsbeatTelemetryPipelineListener(
() -> {
statsbeatModule.shutdown();
localStorageTelemetryPipelineListener.shutdown();
}),
localStorageTelemetryPipelineListener);
}
return new TelemetryItemExporter(statsbeatTelemetryPipeline, statsbeatTelemetryPipelineListener);
}
private AzureMonitorHelper(){}
} | class AzureMonitorHelper {
public static TelemetryItemExporter createStatsbeatTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule, File tempDir) {
TelemetryPipeline statsbeatTelemetryPipeline = new TelemetryPipeline(httpPipeline, null);
TelemetryPipelineListener statsbeatTelemetryPipelineListener;
if (tempDir == null) {
statsbeatTelemetryPipelineListener = new StatsbeatTelemetryPipelineListener(statsbeatModule::shutdown);
} else {
LocalStorageTelemetryPipelineListener localStorageTelemetryPipelineListener =
new LocalStorageTelemetryPipelineListener(
1,
TempDirs.getSubDir(tempDir, "statsbeat"),
statsbeatTelemetryPipeline,
LocalStorageStats.noop(),
true);
statsbeatTelemetryPipelineListener =
TelemetryPipelineListener.composite(
new StatsbeatTelemetryPipelineListener(
() -> {
statsbeatModule.shutdown();
localStorageTelemetryPipelineListener.shutdown();
}),
localStorageTelemetryPipelineListener);
}
return new TelemetryItemExporter(statsbeatTelemetryPipeline, statsbeatTelemetryPipelineListener);
}
private AzureMonitorHelper(){}
} |
are there any other concurrency tests in SDKs our team owns that we should make the same test fix? | public void testConcurrentRequests() {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
ParallelFlux<byte[]> responses = Flux.range(1, numRequests)
.parallel()
.runOn(Schedulers.boundedElastic())
.flatMap(ignored -> doRequest(client, "/long"))
.flatMap(response -> Mono.using(() -> response, HttpResponse::getBodyAsByteArray, HttpResponse::close));
StepVerifier.create(responses).thenConsumeWhile(response -> {
TestUtils.assertArraysEqual(LONG_BODY, response);
return true;
}).expectComplete().verify(Duration.ofSeconds(60));
} | .flatMap(response -> Mono.using(() -> response, HttpResponse::getBodyAsByteArray, HttpResponse::close)); | public void testConcurrentRequests() {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
ParallelFlux<byte[]> responses = Flux.range(1, numRequests)
.parallel()
.runOn(Schedulers.boundedElastic())
.flatMap(ignored -> doRequest(client, "/long"))
.flatMap(response -> Mono.using(() -> response, HttpResponse::getBodyAsByteArray, HttpResponse::close));
StepVerifier.create(responses).thenConsumeWhile(response -> {
TestUtils.assertArraysEqual(LONG_BODY, response);
return true;
}).expectComplete().verify(Duration.ofSeconds(60));
} | class JdkHttpClientTests {
private static final StepVerifierOptions EMPTY_INITIAL_REQUEST_OPTIONS
= StepVerifierOptions.create().initialRequest(0);
private static final String SERVER_HTTP_URI = JdkHttpClientLocalTestServer.getServer().getHttpUri();
@Test
public void testFlowableResponseShortBodyAsByteArrayAsync() {
checkBodyReceived(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseShortBodyAsByteArraySync() throws IOException {
checkBodyReceivedSync(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseLongBodyAsByteArrayAsync() {
checkBodyReceived(LONG_BODY, "/long");
}
@Test
public void testResponseLongBodyAsByteArraySync() throws IOException {
checkBodyReceivedSync(LONG_BODY, "/long");
}
@Test
public void testBufferResponseSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/long").buffer()) {
TestUtils.assertArraysEqual(LONG_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testBufferedResponseSync() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/long"));
try (HttpResponse response = client.sendSync(request, new Context("azure-eagerly-read-response", true))) {
TestUtils.assertArraysEqual(LONG_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testMultipleSubscriptionsEmitsError() {
Mono<byte[]> response = getResponse("/short").cache().flatMap(HttpResponse::getBodyAsByteArray);
StepVerifier.create(response)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(response)
.expectNextCount(0)
.expectError(IllegalStateException.class)
.verify(Duration.ofSeconds(20));
}
@Test
public void testMultipleGetBodyBytesSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/short")) {
Mono<byte[]> responseBody = response.getBodyAsByteArray();
StepVerifier.create(responseBody)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(responseBody)
.expectNextCount(0)
.expectError(IOException.class)
.verify(Duration.ofSeconds(20));
}
}
@Test
@Timeout(20)
public void testMultipleGetBinaryDataSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/short")) {
TestUtils.assertArraysEqual(SHORT_BODY, response.getBodyAsBinaryData().toBytes());
TestUtils.assertArraysEqual(SHORT_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500Returned() {
StepVerifier.create(getResponse("/error").flatMap(response -> {
assertEquals(500, response.getStatusCode());
return response.getBodyAsString();
})).expectNext("error").expectComplete().verify(Duration.ofSeconds(20));
}
@Test
@Timeout(20)
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500ReturnedSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/error")) {
assertEquals(500, response.getStatusCode());
assertEquals("error", response.getBodyAsString().block());
}
}
@Test
public void testFlowableBackpressure() {
StepVerifier.create(getResponse("/long").flatMapMany(HttpResponse::getBody), EMPTY_INITIAL_REQUEST_OPTIONS)
.expectNextCount(0)
.thenRequest(1)
.expectNextCount(1)
.thenRequest(3)
.expectNextCount(3)
.thenRequest(Long.MAX_VALUE)
.thenConsumeWhile(ByteBuffer::hasRemaining)
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request
= new HttpRequest(HttpMethod.POST, url("/shortPost")).setHeader(HttpHeaderName.CONTENT_LENGTH, "132")
.setBody(Flux.error(new RuntimeException("boo")));
StepVerifier.create(client.send(request)).expectErrorMessage("boo").verify();
}
@Test
public void testProgressReporterAsync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
ConcurrentLinkedDeque<Long> progress = new ConcurrentLinkedDeque<>();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(SHORT_BODY.length + LONG_BODY.length))
.setBody(Flux.just(ByteBuffer.wrap(LONG_BODY)).concatWith(Flux.just(ByteBuffer.wrap(SHORT_BODY))));
Contexts contexts = Contexts.with(Context.NONE)
.setHttpRequestProgressReporter(ProgressReporter.withProgressListener(progress::add));
StepVerifier.create(client.send(request, contexts.getContext())).expectNextCount(1).expectComplete().verify();
List<Long> progressList = progress.stream().collect(Collectors.toList());
assertEquals(LONG_BODY.length, progressList.get(0));
assertEquals(SHORT_BODY.length + LONG_BODY.length, progressList.get(1));
}
@Test
public void testProgressReporterSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
ConcurrentLinkedDeque<Long> progress = new ConcurrentLinkedDeque<>();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(SHORT_BODY.length + LONG_BODY.length))
.setBody(Flux.just(ByteBuffer.wrap(LONG_BODY)).concatWith(Flux.just(ByteBuffer.wrap(SHORT_BODY))));
Contexts contexts = Contexts.with(Context.NONE)
.setHttpRequestProgressReporter(ProgressReporter.withProgressListener(progress::add));
try (HttpResponse response = client.sendSync(request, contexts.getContext())) {
assertEquals(200, response.getStatusCode());
List<Long> progressList = progress.stream().collect(Collectors.toList());
assertEquals(LONG_BODY.length, progressList.get(0));
assertEquals(SHORT_BODY.length + LONG_BODY.length, progressList.get(1));
}
}
@Test
public void testFileUploadSync() throws IOException {
Path tempFile = writeToTempFile(LONG_BODY);
tempFile.toFile().deleteOnExit();
BinaryData body = BinaryData.fromFile(tempFile, 1L, 42L);
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPostWithBodyValidation")).setBody(body);
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
public void testStreamUploadAsync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
InputStream requestBody = new ByteArrayInputStream(LONG_BODY, 1, 42);
BinaryData body = BinaryData.fromStream(requestBody, 42L);
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPostWithBodyValidation"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, "42")
.setBody(body);
StepVerifier.create(client.send(request))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponseSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request
= new HttpRequest(HttpMethod.POST, url("/shortPost")).setHeader(HttpHeaderName.CONTENT_LENGTH, "132")
.setBody(Flux.error(new RuntimeException("boo")));
UncheckedIOException thrown
= assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
assertEquals("boo", thrown.getCause().getMessage());
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
try {
StepVerifier.create(client.send(request)).expectErrorMessage("boo").verify(Duration.ofSeconds(10));
} catch (Exception ex) {
assertEquals("boo", ex.getMessage());
}
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponseSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
UncheckedIOException thrown
= assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
assertEquals("boo", thrown.getCause().getMessage());
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentFlowable() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/connectionClose"));
StepVerifier.create(client.send(request).flatMap(HttpResponse::getBodyAsByteArray))
.verifyError(IOException.class);
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentSync() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/connectionClose"));
assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
}
@Test
@Test
public void testConcurrentRequestsSync() throws InterruptedException {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
ForkJoinPool pool = new ForkJoinPool();
List<Callable<Void>> requests = new ArrayList<>(numRequests);
for (int i = 0; i < numRequests; i++) {
requests.add(() -> {
try (HttpResponse response = doRequestSync(client, "/long")) {
byte[] body = response.getBodyAsBinaryData().toBytes();
TestUtils.assertArraysEqual(LONG_BODY, body);
return null;
}
});
}
pool.invokeAll(requests);
pool.shutdown();
assertTrue(pool.awaitTermination(60, TimeUnit.SECONDS));
}
@Test
public void testIOExceptionInWriteBodyTo() {
HttpClient client = new JdkHttpClientProvider().createInstance();
assertThrows(IOException.class, () -> {
try (HttpResponse response = doRequestSync(client, "/long")) {
response.writeBodyTo(new ThrowingWritableByteChannel());
}
});
}
private static Mono<HttpResponse> getResponse(String path) {
HttpClient client = new JdkHttpClientBuilder().build();
return doRequest(client, path);
}
private static URL url(String path) {
try {
return UrlBuilder.parse(SERVER_HTTP_URI + path).toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static byte[] createLongBody() {
byte[] duplicateBytes = "abcdefghijk".getBytes(StandardCharsets.UTF_8);
byte[] longBody = new byte[duplicateBytes.length * 100000];
for (int i = 0; i < 100000; i++) {
System.arraycopy(duplicateBytes, 0, longBody, i * duplicateBytes.length, duplicateBytes.length);
}
return longBody;
}
private static void checkBodyReceived(byte[] expectedBody, String path) {
HttpClient client = new JdkHttpClientBuilder().build();
StepVerifier.create(doRequest(client, path).flatMap(HttpResponse::getBodyAsByteArray))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedBody, bytes))
.verifyComplete();
}
private static void checkBodyReceivedSync(byte[] expectedBody, String path) throws IOException {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, path)) {
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
WritableByteChannel body = Channels.newChannel(outStream);
response.writeBodyTo(body);
TestUtils.assertArraysEqual(expectedBody, outStream.toByteArray());
}
}
private static Mono<HttpResponse> doRequest(HttpClient client, String path) {
return client.send(new HttpRequest(HttpMethod.GET, url(path)));
}
private static HttpResponse doRequestSync(HttpClient client, String path) {
return client.sendSync(new HttpRequest(HttpMethod.GET, url(path)), Context.NONE);
}
private static Path writeToTempFile(byte[] body) throws IOException {
Path tempFile = Files.createTempFile("data", null);
tempFile.toFile().deleteOnExit();
String tempFilePath = tempFile.toString();
FileOutputStream outputStream = new FileOutputStream(tempFilePath);
outputStream.write(body);
outputStream.close();
return tempFile;
}
private static final class ThrowingWritableByteChannel implements WritableByteChannel {
private boolean open = true;
int writeCount = 0;
@Override
public int write(ByteBuffer src) throws IOException {
if (writeCount++ < 3) {
int remaining = src.remaining();
src.position(src.position() + remaining);
return remaining;
} else {
throw new IOException();
}
}
@Override
public boolean isOpen() {
return open;
}
@Override
public void close() throws IOException {
open = false;
}
}
} | class JdkHttpClientTests {
private static final StepVerifierOptions EMPTY_INITIAL_REQUEST_OPTIONS
= StepVerifierOptions.create().initialRequest(0);
private static final String SERVER_HTTP_URI = JdkHttpClientLocalTestServer.getServer().getHttpUri();
@Test
public void testFlowableResponseShortBodyAsByteArrayAsync() {
checkBodyReceived(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseShortBodyAsByteArraySync() throws IOException {
checkBodyReceivedSync(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseLongBodyAsByteArrayAsync() {
checkBodyReceived(LONG_BODY, "/long");
}
@Test
public void testResponseLongBodyAsByteArraySync() throws IOException {
checkBodyReceivedSync(LONG_BODY, "/long");
}
@Test
public void testBufferResponseSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/long").buffer()) {
TestUtils.assertArraysEqual(LONG_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testBufferedResponseSync() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/long"));
try (HttpResponse response = client.sendSync(request, new Context("azure-eagerly-read-response", true))) {
TestUtils.assertArraysEqual(LONG_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testMultipleSubscriptionsEmitsError() {
Mono<byte[]> response = getResponse("/short").cache().flatMap(HttpResponse::getBodyAsByteArray);
StepVerifier.create(response)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(response)
.expectNextCount(0)
.expectError(IllegalStateException.class)
.verify(Duration.ofSeconds(20));
}
@Test
public void testMultipleGetBodyBytesSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/short")) {
Mono<byte[]> responseBody = response.getBodyAsByteArray();
StepVerifier.create(responseBody)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(responseBody)
.expectNextCount(0)
.expectError(IOException.class)
.verify(Duration.ofSeconds(20));
}
}
@Test
@Timeout(20)
public void testMultipleGetBinaryDataSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/short")) {
TestUtils.assertArraysEqual(SHORT_BODY, response.getBodyAsBinaryData().toBytes());
TestUtils.assertArraysEqual(SHORT_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500Returned() {
StepVerifier.create(getResponse("/error").flatMap(response -> {
assertEquals(500, response.getStatusCode());
return response.getBodyAsString();
})).expectNext("error").expectComplete().verify(Duration.ofSeconds(20));
}
@Test
@Timeout(20)
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500ReturnedSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/error")) {
assertEquals(500, response.getStatusCode());
assertEquals("error", response.getBodyAsString().block());
}
}
@Test
public void testFlowableBackpressure() {
StepVerifier.create(getResponse("/long").flatMapMany(HttpResponse::getBody), EMPTY_INITIAL_REQUEST_OPTIONS)
.expectNextCount(0)
.thenRequest(1)
.expectNextCount(1)
.thenRequest(3)
.expectNextCount(3)
.thenRequest(Long.MAX_VALUE)
.thenConsumeWhile(ByteBuffer::hasRemaining)
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request
= new HttpRequest(HttpMethod.POST, url("/shortPost")).setHeader(HttpHeaderName.CONTENT_LENGTH, "132")
.setBody(Flux.error(new RuntimeException("boo")));
StepVerifier.create(client.send(request)).expectErrorMessage("boo").verify();
}
@Test
public void testProgressReporterAsync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
ConcurrentLinkedDeque<Long> progress = new ConcurrentLinkedDeque<>();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(SHORT_BODY.length + LONG_BODY.length))
.setBody(Flux.just(ByteBuffer.wrap(LONG_BODY)).concatWith(Flux.just(ByteBuffer.wrap(SHORT_BODY))));
Contexts contexts = Contexts.with(Context.NONE)
.setHttpRequestProgressReporter(ProgressReporter.withProgressListener(progress::add));
StepVerifier.create(client.send(request, contexts.getContext())).expectNextCount(1).expectComplete().verify();
List<Long> progressList = progress.stream().collect(Collectors.toList());
assertEquals(LONG_BODY.length, progressList.get(0));
assertEquals(SHORT_BODY.length + LONG_BODY.length, progressList.get(1));
}
@Test
public void testProgressReporterSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
ConcurrentLinkedDeque<Long> progress = new ConcurrentLinkedDeque<>();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(SHORT_BODY.length + LONG_BODY.length))
.setBody(Flux.just(ByteBuffer.wrap(LONG_BODY)).concatWith(Flux.just(ByteBuffer.wrap(SHORT_BODY))));
Contexts contexts = Contexts.with(Context.NONE)
.setHttpRequestProgressReporter(ProgressReporter.withProgressListener(progress::add));
try (HttpResponse response = client.sendSync(request, contexts.getContext())) {
assertEquals(200, response.getStatusCode());
List<Long> progressList = progress.stream().collect(Collectors.toList());
assertEquals(LONG_BODY.length, progressList.get(0));
assertEquals(SHORT_BODY.length + LONG_BODY.length, progressList.get(1));
}
}
@Test
public void testFileUploadSync() throws IOException {
Path tempFile = writeToTempFile(LONG_BODY);
tempFile.toFile().deleteOnExit();
BinaryData body = BinaryData.fromFile(tempFile, 1L, 42L);
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPostWithBodyValidation")).setBody(body);
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
public void testStreamUploadAsync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
InputStream requestBody = new ByteArrayInputStream(LONG_BODY, 1, 42);
BinaryData body = BinaryData.fromStream(requestBody, 42L);
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPostWithBodyValidation"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, "42")
.setBody(body);
StepVerifier.create(client.send(request))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponseSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request
= new HttpRequest(HttpMethod.POST, url("/shortPost")).setHeader(HttpHeaderName.CONTENT_LENGTH, "132")
.setBody(Flux.error(new RuntimeException("boo")));
UncheckedIOException thrown
= assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
assertEquals("boo", thrown.getCause().getMessage());
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
try {
StepVerifier.create(client.send(request)).expectErrorMessage("boo").verify(Duration.ofSeconds(10));
} catch (Exception ex) {
assertEquals("boo", ex.getMessage());
}
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponseSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
UncheckedIOException thrown
= assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
assertEquals("boo", thrown.getCause().getMessage());
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentFlowable() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/connectionClose"));
StepVerifier.create(client.send(request).flatMap(HttpResponse::getBodyAsByteArray))
.verifyError(IOException.class);
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentSync() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/connectionClose"));
assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
}
@Test
@Test
public void testConcurrentRequestsSync() throws InterruptedException {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
ForkJoinPool pool = new ForkJoinPool();
List<Callable<Void>> requests = new ArrayList<>(numRequests);
for (int i = 0; i < numRequests; i++) {
requests.add(() -> {
try (HttpResponse response = doRequestSync(client, "/long")) {
byte[] body = response.getBodyAsBinaryData().toBytes();
TestUtils.assertArraysEqual(LONG_BODY, body);
return null;
}
});
}
pool.invokeAll(requests);
pool.shutdown();
assertTrue(pool.awaitTermination(60, TimeUnit.SECONDS));
}
@Test
public void testIOExceptionInWriteBodyTo() {
HttpClient client = new JdkHttpClientProvider().createInstance();
assertThrows(IOException.class, () -> {
try (HttpResponse response = doRequestSync(client, "/long")) {
response.writeBodyTo(new ThrowingWritableByteChannel());
}
});
}
private static Mono<HttpResponse> getResponse(String path) {
HttpClient client = new JdkHttpClientBuilder().build();
return doRequest(client, path);
}
private static URL url(String path) {
try {
return UrlBuilder.parse(SERVER_HTTP_URI + path).toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static byte[] createLongBody() {
byte[] duplicateBytes = "abcdefghijk".getBytes(StandardCharsets.UTF_8);
byte[] longBody = new byte[duplicateBytes.length * 100000];
for (int i = 0; i < 100000; i++) {
System.arraycopy(duplicateBytes, 0, longBody, i * duplicateBytes.length, duplicateBytes.length);
}
return longBody;
}
private static void checkBodyReceived(byte[] expectedBody, String path) {
HttpClient client = new JdkHttpClientBuilder().build();
StepVerifier.create(doRequest(client, path).flatMap(HttpResponse::getBodyAsByteArray))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedBody, bytes))
.verifyComplete();
}
private static void checkBodyReceivedSync(byte[] expectedBody, String path) throws IOException {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, path)) {
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
WritableByteChannel body = Channels.newChannel(outStream);
response.writeBodyTo(body);
TestUtils.assertArraysEqual(expectedBody, outStream.toByteArray());
}
}
private static Mono<HttpResponse> doRequest(HttpClient client, String path) {
return client.send(new HttpRequest(HttpMethod.GET, url(path)));
}
private static HttpResponse doRequestSync(HttpClient client, String path) {
return client.sendSync(new HttpRequest(HttpMethod.GET, url(path)), Context.NONE);
}
private static Path writeToTempFile(byte[] body) throws IOException {
Path tempFile = Files.createTempFile("data", null);
tempFile.toFile().deleteOnExit();
String tempFilePath = tempFile.toString();
FileOutputStream outputStream = new FileOutputStream(tempFilePath);
outputStream.write(body);
outputStream.close();
return tempFile;
}
private static final class ThrowingWritableByteChannel implements WritableByteChannel {
private boolean open = true;
int writeCount = 0;
@Override
public int write(ByteBuffer src) throws IOException {
if (writeCount++ < 3) {
int remaining = src.remaining();
src.position(src.position() + remaining);
return remaining;
} else {
throw new IOException();
}
}
@Override
public boolean isOpen() {
return open;
}
@Override
public void close() throws IOException {
open = false;
}
}
} |
There aren't, or they're using types that `HttpResponse` is converted into which doesn't run into this issue. | public void testConcurrentRequests() {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
ParallelFlux<byte[]> responses = Flux.range(1, numRequests)
.parallel()
.runOn(Schedulers.boundedElastic())
.flatMap(ignored -> doRequest(client, "/long"))
.flatMap(response -> Mono.using(() -> response, HttpResponse::getBodyAsByteArray, HttpResponse::close));
StepVerifier.create(responses).thenConsumeWhile(response -> {
TestUtils.assertArraysEqual(LONG_BODY, response);
return true;
}).expectComplete().verify(Duration.ofSeconds(60));
} | .flatMap(response -> Mono.using(() -> response, HttpResponse::getBodyAsByteArray, HttpResponse::close)); | public void testConcurrentRequests() {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
ParallelFlux<byte[]> responses = Flux.range(1, numRequests)
.parallel()
.runOn(Schedulers.boundedElastic())
.flatMap(ignored -> doRequest(client, "/long"))
.flatMap(response -> Mono.using(() -> response, HttpResponse::getBodyAsByteArray, HttpResponse::close));
StepVerifier.create(responses).thenConsumeWhile(response -> {
TestUtils.assertArraysEqual(LONG_BODY, response);
return true;
}).expectComplete().verify(Duration.ofSeconds(60));
} | class JdkHttpClientTests {
private static final StepVerifierOptions EMPTY_INITIAL_REQUEST_OPTIONS
= StepVerifierOptions.create().initialRequest(0);
private static final String SERVER_HTTP_URI = JdkHttpClientLocalTestServer.getServer().getHttpUri();
@Test
public void testFlowableResponseShortBodyAsByteArrayAsync() {
checkBodyReceived(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseShortBodyAsByteArraySync() throws IOException {
checkBodyReceivedSync(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseLongBodyAsByteArrayAsync() {
checkBodyReceived(LONG_BODY, "/long");
}
@Test
public void testResponseLongBodyAsByteArraySync() throws IOException {
checkBodyReceivedSync(LONG_BODY, "/long");
}
@Test
public void testBufferResponseSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/long").buffer()) {
TestUtils.assertArraysEqual(LONG_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testBufferedResponseSync() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/long"));
try (HttpResponse response = client.sendSync(request, new Context("azure-eagerly-read-response", true))) {
TestUtils.assertArraysEqual(LONG_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testMultipleSubscriptionsEmitsError() {
Mono<byte[]> response = getResponse("/short").cache().flatMap(HttpResponse::getBodyAsByteArray);
StepVerifier.create(response)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(response)
.expectNextCount(0)
.expectError(IllegalStateException.class)
.verify(Duration.ofSeconds(20));
}
@Test
public void testMultipleGetBodyBytesSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/short")) {
Mono<byte[]> responseBody = response.getBodyAsByteArray();
StepVerifier.create(responseBody)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(responseBody)
.expectNextCount(0)
.expectError(IOException.class)
.verify(Duration.ofSeconds(20));
}
}
@Test
@Timeout(20)
public void testMultipleGetBinaryDataSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/short")) {
TestUtils.assertArraysEqual(SHORT_BODY, response.getBodyAsBinaryData().toBytes());
TestUtils.assertArraysEqual(SHORT_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500Returned() {
StepVerifier.create(getResponse("/error").flatMap(response -> {
assertEquals(500, response.getStatusCode());
return response.getBodyAsString();
})).expectNext("error").expectComplete().verify(Duration.ofSeconds(20));
}
@Test
@Timeout(20)
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500ReturnedSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/error")) {
assertEquals(500, response.getStatusCode());
assertEquals("error", response.getBodyAsString().block());
}
}
@Test
public void testFlowableBackpressure() {
StepVerifier.create(getResponse("/long").flatMapMany(HttpResponse::getBody), EMPTY_INITIAL_REQUEST_OPTIONS)
.expectNextCount(0)
.thenRequest(1)
.expectNextCount(1)
.thenRequest(3)
.expectNextCount(3)
.thenRequest(Long.MAX_VALUE)
.thenConsumeWhile(ByteBuffer::hasRemaining)
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request
= new HttpRequest(HttpMethod.POST, url("/shortPost")).setHeader(HttpHeaderName.CONTENT_LENGTH, "132")
.setBody(Flux.error(new RuntimeException("boo")));
StepVerifier.create(client.send(request)).expectErrorMessage("boo").verify();
}
@Test
public void testProgressReporterAsync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
ConcurrentLinkedDeque<Long> progress = new ConcurrentLinkedDeque<>();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(SHORT_BODY.length + LONG_BODY.length))
.setBody(Flux.just(ByteBuffer.wrap(LONG_BODY)).concatWith(Flux.just(ByteBuffer.wrap(SHORT_BODY))));
Contexts contexts = Contexts.with(Context.NONE)
.setHttpRequestProgressReporter(ProgressReporter.withProgressListener(progress::add));
StepVerifier.create(client.send(request, contexts.getContext())).expectNextCount(1).expectComplete().verify();
List<Long> progressList = progress.stream().collect(Collectors.toList());
assertEquals(LONG_BODY.length, progressList.get(0));
assertEquals(SHORT_BODY.length + LONG_BODY.length, progressList.get(1));
}
@Test
public void testProgressReporterSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
ConcurrentLinkedDeque<Long> progress = new ConcurrentLinkedDeque<>();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(SHORT_BODY.length + LONG_BODY.length))
.setBody(Flux.just(ByteBuffer.wrap(LONG_BODY)).concatWith(Flux.just(ByteBuffer.wrap(SHORT_BODY))));
Contexts contexts = Contexts.with(Context.NONE)
.setHttpRequestProgressReporter(ProgressReporter.withProgressListener(progress::add));
try (HttpResponse response = client.sendSync(request, contexts.getContext())) {
assertEquals(200, response.getStatusCode());
List<Long> progressList = progress.stream().collect(Collectors.toList());
assertEquals(LONG_BODY.length, progressList.get(0));
assertEquals(SHORT_BODY.length + LONG_BODY.length, progressList.get(1));
}
}
@Test
public void testFileUploadSync() throws IOException {
Path tempFile = writeToTempFile(LONG_BODY);
tempFile.toFile().deleteOnExit();
BinaryData body = BinaryData.fromFile(tempFile, 1L, 42L);
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPostWithBodyValidation")).setBody(body);
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
public void testStreamUploadAsync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
InputStream requestBody = new ByteArrayInputStream(LONG_BODY, 1, 42);
BinaryData body = BinaryData.fromStream(requestBody, 42L);
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPostWithBodyValidation"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, "42")
.setBody(body);
StepVerifier.create(client.send(request))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponseSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request
= new HttpRequest(HttpMethod.POST, url("/shortPost")).setHeader(HttpHeaderName.CONTENT_LENGTH, "132")
.setBody(Flux.error(new RuntimeException("boo")));
UncheckedIOException thrown
= assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
assertEquals("boo", thrown.getCause().getMessage());
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
try {
StepVerifier.create(client.send(request)).expectErrorMessage("boo").verify(Duration.ofSeconds(10));
} catch (Exception ex) {
assertEquals("boo", ex.getMessage());
}
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponseSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
UncheckedIOException thrown
= assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
assertEquals("boo", thrown.getCause().getMessage());
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentFlowable() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/connectionClose"));
StepVerifier.create(client.send(request).flatMap(HttpResponse::getBodyAsByteArray))
.verifyError(IOException.class);
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentSync() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/connectionClose"));
assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
}
@Test
@Test
public void testConcurrentRequestsSync() throws InterruptedException {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
ForkJoinPool pool = new ForkJoinPool();
List<Callable<Void>> requests = new ArrayList<>(numRequests);
for (int i = 0; i < numRequests; i++) {
requests.add(() -> {
try (HttpResponse response = doRequestSync(client, "/long")) {
byte[] body = response.getBodyAsBinaryData().toBytes();
TestUtils.assertArraysEqual(LONG_BODY, body);
return null;
}
});
}
pool.invokeAll(requests);
pool.shutdown();
assertTrue(pool.awaitTermination(60, TimeUnit.SECONDS));
}
@Test
public void testIOExceptionInWriteBodyTo() {
HttpClient client = new JdkHttpClientProvider().createInstance();
assertThrows(IOException.class, () -> {
try (HttpResponse response = doRequestSync(client, "/long")) {
response.writeBodyTo(new ThrowingWritableByteChannel());
}
});
}
private static Mono<HttpResponse> getResponse(String path) {
HttpClient client = new JdkHttpClientBuilder().build();
return doRequest(client, path);
}
private static URL url(String path) {
try {
return UrlBuilder.parse(SERVER_HTTP_URI + path).toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static byte[] createLongBody() {
byte[] duplicateBytes = "abcdefghijk".getBytes(StandardCharsets.UTF_8);
byte[] longBody = new byte[duplicateBytes.length * 100000];
for (int i = 0; i < 100000; i++) {
System.arraycopy(duplicateBytes, 0, longBody, i * duplicateBytes.length, duplicateBytes.length);
}
return longBody;
}
private static void checkBodyReceived(byte[] expectedBody, String path) {
HttpClient client = new JdkHttpClientBuilder().build();
StepVerifier.create(doRequest(client, path).flatMap(HttpResponse::getBodyAsByteArray))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedBody, bytes))
.verifyComplete();
}
private static void checkBodyReceivedSync(byte[] expectedBody, String path) throws IOException {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, path)) {
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
WritableByteChannel body = Channels.newChannel(outStream);
response.writeBodyTo(body);
TestUtils.assertArraysEqual(expectedBody, outStream.toByteArray());
}
}
private static Mono<HttpResponse> doRequest(HttpClient client, String path) {
return client.send(new HttpRequest(HttpMethod.GET, url(path)));
}
private static HttpResponse doRequestSync(HttpClient client, String path) {
return client.sendSync(new HttpRequest(HttpMethod.GET, url(path)), Context.NONE);
}
private static Path writeToTempFile(byte[] body) throws IOException {
Path tempFile = Files.createTempFile("data", null);
tempFile.toFile().deleteOnExit();
String tempFilePath = tempFile.toString();
FileOutputStream outputStream = new FileOutputStream(tempFilePath);
outputStream.write(body);
outputStream.close();
return tempFile;
}
private static final class ThrowingWritableByteChannel implements WritableByteChannel {
private boolean open = true;
int writeCount = 0;
@Override
public int write(ByteBuffer src) throws IOException {
if (writeCount++ < 3) {
int remaining = src.remaining();
src.position(src.position() + remaining);
return remaining;
} else {
throw new IOException();
}
}
@Override
public boolean isOpen() {
return open;
}
@Override
public void close() throws IOException {
open = false;
}
}
} | class JdkHttpClientTests {
private static final StepVerifierOptions EMPTY_INITIAL_REQUEST_OPTIONS
= StepVerifierOptions.create().initialRequest(0);
private static final String SERVER_HTTP_URI = JdkHttpClientLocalTestServer.getServer().getHttpUri();
@Test
public void testFlowableResponseShortBodyAsByteArrayAsync() {
checkBodyReceived(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseShortBodyAsByteArraySync() throws IOException {
checkBodyReceivedSync(SHORT_BODY, "/short");
}
@Test
public void testFlowableResponseLongBodyAsByteArrayAsync() {
checkBodyReceived(LONG_BODY, "/long");
}
@Test
public void testResponseLongBodyAsByteArraySync() throws IOException {
checkBodyReceivedSync(LONG_BODY, "/long");
}
@Test
public void testBufferResponseSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/long").buffer()) {
TestUtils.assertArraysEqual(LONG_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testBufferedResponseSync() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/long"));
try (HttpResponse response = client.sendSync(request, new Context("azure-eagerly-read-response", true))) {
TestUtils.assertArraysEqual(LONG_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testMultipleSubscriptionsEmitsError() {
Mono<byte[]> response = getResponse("/short").cache().flatMap(HttpResponse::getBodyAsByteArray);
StepVerifier.create(response)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(response)
.expectNextCount(0)
.expectError(IllegalStateException.class)
.verify(Duration.ofSeconds(20));
}
@Test
public void testMultipleGetBodyBytesSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/short")) {
Mono<byte[]> responseBody = response.getBodyAsByteArray();
StepVerifier.create(responseBody)
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify(Duration.ofSeconds(20));
StepVerifier.create(responseBody)
.expectNextCount(0)
.expectError(IOException.class)
.verify(Duration.ofSeconds(20));
}
}
@Test
@Timeout(20)
public void testMultipleGetBinaryDataSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/short")) {
TestUtils.assertArraysEqual(SHORT_BODY, response.getBodyAsBinaryData().toBytes());
TestUtils.assertArraysEqual(SHORT_BODY, response.getBodyAsBinaryData().toBytes());
}
}
@Test
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500Returned() {
StepVerifier.create(getResponse("/error").flatMap(response -> {
assertEquals(500, response.getStatusCode());
return response.getBodyAsString();
})).expectNext("error").expectComplete().verify(Duration.ofSeconds(20));
}
@Test
@Timeout(20)
public void testFlowableWhenServerReturnsBodyAndNoErrorsWhenHttp500ReturnedSync() {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, "/error")) {
assertEquals(500, response.getStatusCode());
assertEquals("error", response.getBodyAsString().block());
}
}
@Test
public void testFlowableBackpressure() {
StepVerifier.create(getResponse("/long").flatMapMany(HttpResponse::getBody), EMPTY_INITIAL_REQUEST_OPTIONS)
.expectNextCount(0)
.thenRequest(1)
.expectNextCount(1)
.thenRequest(3)
.expectNextCount(3)
.thenRequest(Long.MAX_VALUE)
.thenConsumeWhile(ByteBuffer::hasRemaining)
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request
= new HttpRequest(HttpMethod.POST, url("/shortPost")).setHeader(HttpHeaderName.CONTENT_LENGTH, "132")
.setBody(Flux.error(new RuntimeException("boo")));
StepVerifier.create(client.send(request)).expectErrorMessage("boo").verify();
}
@Test
public void testProgressReporterAsync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
ConcurrentLinkedDeque<Long> progress = new ConcurrentLinkedDeque<>();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(SHORT_BODY.length + LONG_BODY.length))
.setBody(Flux.just(ByteBuffer.wrap(LONG_BODY)).concatWith(Flux.just(ByteBuffer.wrap(SHORT_BODY))));
Contexts contexts = Contexts.with(Context.NONE)
.setHttpRequestProgressReporter(ProgressReporter.withProgressListener(progress::add));
StepVerifier.create(client.send(request, contexts.getContext())).expectNextCount(1).expectComplete().verify();
List<Long> progressList = progress.stream().collect(Collectors.toList());
assertEquals(LONG_BODY.length, progressList.get(0));
assertEquals(SHORT_BODY.length + LONG_BODY.length, progressList.get(1));
}
@Test
public void testProgressReporterSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
ConcurrentLinkedDeque<Long> progress = new ConcurrentLinkedDeque<>();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(SHORT_BODY.length + LONG_BODY.length))
.setBody(Flux.just(ByteBuffer.wrap(LONG_BODY)).concatWith(Flux.just(ByteBuffer.wrap(SHORT_BODY))));
Contexts contexts = Contexts.with(Context.NONE)
.setHttpRequestProgressReporter(ProgressReporter.withProgressListener(progress::add));
try (HttpResponse response = client.sendSync(request, contexts.getContext())) {
assertEquals(200, response.getStatusCode());
List<Long> progressList = progress.stream().collect(Collectors.toList());
assertEquals(LONG_BODY.length, progressList.get(0));
assertEquals(SHORT_BODY.length + LONG_BODY.length, progressList.get(1));
}
}
@Test
public void testFileUploadSync() throws IOException {
Path tempFile = writeToTempFile(LONG_BODY);
tempFile.toFile().deleteOnExit();
BinaryData body = BinaryData.fromFile(tempFile, 1L, 42L);
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPostWithBodyValidation")).setBody(body);
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
public void testStreamUploadAsync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
InputStream requestBody = new ByteArrayInputStream(LONG_BODY, 1, 42);
BinaryData body = BinaryData.fromStream(requestBody, 42L);
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPostWithBodyValidation"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, "42")
.setBody(body);
StepVerifier.create(client.send(request))
.assertNext(r -> assertEquals(200, r.getStatusCode()))
.verifyComplete();
}
@Test
public void testRequestBodyIsErrorShouldPropagateToResponseSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
HttpRequest request
= new HttpRequest(HttpMethod.POST, url("/shortPost")).setHeader(HttpHeaderName.CONTENT_LENGTH, "132")
.setBody(Flux.error(new RuntimeException("boo")));
UncheckedIOException thrown
= assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
assertEquals("boo", thrown.getCause().getMessage());
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponse() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
try {
StepVerifier.create(client.send(request)).expectErrorMessage("boo").verify(Duration.ofSeconds(10));
} catch (Exception ex) {
assertEquals("boo", ex.getMessage());
}
}
@Test
public void testRequestBodyEndsInErrorShouldPropagateToResponseSync() {
HttpClient client = new JdkHttpClientProvider().createInstance();
String contentChunk = "abcdefgh";
int repetitions = 1000;
HttpRequest request = new HttpRequest(HttpMethod.POST, url("/shortPost"))
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(contentChunk.length() * (repetitions + 1)))
.setBody(Flux.just(contentChunk)
.repeat(repetitions)
.map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
.concatWith(Flux.error(new RuntimeException("boo"))));
UncheckedIOException thrown
= assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
assertEquals("boo", thrown.getCause().getMessage());
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentFlowable() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/connectionClose"));
StepVerifier.create(client.send(request).flatMap(HttpResponse::getBodyAsByteArray))
.verifyError(IOException.class);
}
@Test
public void testServerShutsDownSocketShouldPushErrorToContentSync() {
HttpClient client = new JdkHttpClientBuilder().build();
HttpRequest request = new HttpRequest(HttpMethod.GET, url("/connectionClose"));
assertThrows(UncheckedIOException.class, () -> client.sendSync(request, Context.NONE));
}
@Test
@Test
public void testConcurrentRequestsSync() throws InterruptedException {
int numRequests = 100;
HttpClient client = new JdkHttpClientProvider().createInstance();
ForkJoinPool pool = new ForkJoinPool();
List<Callable<Void>> requests = new ArrayList<>(numRequests);
for (int i = 0; i < numRequests; i++) {
requests.add(() -> {
try (HttpResponse response = doRequestSync(client, "/long")) {
byte[] body = response.getBodyAsBinaryData().toBytes();
TestUtils.assertArraysEqual(LONG_BODY, body);
return null;
}
});
}
pool.invokeAll(requests);
pool.shutdown();
assertTrue(pool.awaitTermination(60, TimeUnit.SECONDS));
}
@Test
public void testIOExceptionInWriteBodyTo() {
HttpClient client = new JdkHttpClientProvider().createInstance();
assertThrows(IOException.class, () -> {
try (HttpResponse response = doRequestSync(client, "/long")) {
response.writeBodyTo(new ThrowingWritableByteChannel());
}
});
}
private static Mono<HttpResponse> getResponse(String path) {
HttpClient client = new JdkHttpClientBuilder().build();
return doRequest(client, path);
}
private static URL url(String path) {
try {
return UrlBuilder.parse(SERVER_HTTP_URI + path).toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static byte[] createLongBody() {
byte[] duplicateBytes = "abcdefghijk".getBytes(StandardCharsets.UTF_8);
byte[] longBody = new byte[duplicateBytes.length * 100000];
for (int i = 0; i < 100000; i++) {
System.arraycopy(duplicateBytes, 0, longBody, i * duplicateBytes.length, duplicateBytes.length);
}
return longBody;
}
private static void checkBodyReceived(byte[] expectedBody, String path) {
HttpClient client = new JdkHttpClientBuilder().build();
StepVerifier.create(doRequest(client, path).flatMap(HttpResponse::getBodyAsByteArray))
.assertNext(bytes -> TestUtils.assertArraysEqual(expectedBody, bytes))
.verifyComplete();
}
private static void checkBodyReceivedSync(byte[] expectedBody, String path) throws IOException {
HttpClient client = new JdkHttpClientBuilder().build();
try (HttpResponse response = doRequestSync(client, path)) {
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
WritableByteChannel body = Channels.newChannel(outStream);
response.writeBodyTo(body);
TestUtils.assertArraysEqual(expectedBody, outStream.toByteArray());
}
}
private static Mono<HttpResponse> doRequest(HttpClient client, String path) {
return client.send(new HttpRequest(HttpMethod.GET, url(path)));
}
private static HttpResponse doRequestSync(HttpClient client, String path) {
return client.sendSync(new HttpRequest(HttpMethod.GET, url(path)), Context.NONE);
}
private static Path writeToTempFile(byte[] body) throws IOException {
Path tempFile = Files.createTempFile("data", null);
tempFile.toFile().deleteOnExit();
String tempFilePath = tempFile.toString();
FileOutputStream outputStream = new FileOutputStream(tempFilePath);
outputStream.write(body);
outputStream.close();
return tempFile;
}
private static final class ThrowingWritableByteChannel implements WritableByteChannel {
private boolean open = true;
int writeCount = 0;
@Override
public int write(ByteBuffer src) throws IOException {
if (writeCount++ < 3) {
int remaining = src.remaining();
src.position(src.position() + remaining);
return remaining;
} else {
throw new IOException();
}
}
@Override
public boolean isOpen() {
return open;
}
@Override
public void close() throws IOException {
open = false;
}
}
} |
In GA, we may need to improve the API to options bag. I assume `content` is the payload, most of others is the query param, traceId is header param. | public void translateWithNoTranslateTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("en");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<span class=notranslate>今天是怎么回事是</span>非常可怕的"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("今天是怎么回事是"));
} | List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null); | public void translateWithNoTranslateTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("en");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<span class=notranslate>今天是怎么回事是</span>非常可怕的"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("今天是怎么回事是"));
} | class TranslateTests extends TextTranslationClientBase {
@Test
public void translateBasic() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("Hola mundo"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithAutoDetect() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
@Test
public void translateWithDictionaryTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("es");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "en", null, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("es", response.get(0).getTranslations().get(0).getTo());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("wordomatic"));
}
@Test
public void translateWithTransliteration() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hans");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("hudha akhtabar."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "ar", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getSourceText().getText());
assertEquals("zh-Hans", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateFromLatinToLatinScript() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("ta");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("ap kaise ho"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "hi", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getTranslations().get(0).getTransliteration().getScript());
assertEquals("eppadi irukkiraai?", response.get(0).getTranslations().get(0).getTransliteration().getText());
}
@Test
public void translateWithMultipleInputTexts() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
content.add(new InputTextItem("Esto es una prueba."));
content.add(new InputTextItem("Dies ist ein Test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(3, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals("es", response.get(1).getDetectedLanguage().getLanguage());
assertEquals("de", response.get(2).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(1).getDetectedLanguage().getScore());
assertEquals(1, response.get(2).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(1).getTranslations().get(0).getText());
assertNotNull(response.get(2).getTranslations().get(0).getText());
}
@Test
public void translateMultipleTargetLanguages() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
targetLanguages.add("es");
targetLanguages.add("de");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals(3, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(0).getTranslations().get(1).getText());
assertNotNull(response.get(0).getTranslations().get(2).getText());
}
@Test
public void translateDifferentTextTypes() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<html><body>This <b>is</b> a test.</body></html>"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
}
@Test
public void translateWithProfanity() {
ProfanityAction profanityAction = ProfanityAction.MARKED;
ProfanityMarker profanityMarker = ProfanityMarker.ASTERISK;
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hant");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("shit this is fucking crazy"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, profanityAction, profanityMarker, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("***"));
}
@Test
public void translateWithAlignment() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, true, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getAlignment().getProj());
}
@Test
public void translateWithIncludeSentenceLength() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("fr");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, null, true, null, null, null, null);
assertEquals(1, response.size());
assertEquals("fr", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getSrcSentLen().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getTransSentLen().size());
}
@Test
public void translateWithCustomEndpoint() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClientWithCustomEndpoint().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithToken() throws Exception {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClientWithToken().translate(targetLanguages, content);
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
} | class TranslateTests extends TextTranslationClientBase {
@Test
public void translateBasic() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("Hola mundo"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithAutoDetect() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
@Test
public void translateWithDictionaryTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("es");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "en", null, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("es", response.get(0).getTranslations().get(0).getTo());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("wordomatic"));
}
@Test
public void translateWithTransliteration() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hans");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("hudha akhtabar."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "ar", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getSourceText().getText());
assertEquals("zh-Hans", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateFromLatinToLatinScript() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("ta");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("ap kaise ho"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "hi", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getTranslations().get(0).getTransliteration().getScript());
assertEquals("eppadi irukkiraai?", response.get(0).getTranslations().get(0).getTransliteration().getText());
}
@Test
public void translateWithMultipleInputTexts() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
content.add(new InputTextItem("Esto es una prueba."));
content.add(new InputTextItem("Dies ist ein Test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(3, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals("es", response.get(1).getDetectedLanguage().getLanguage());
assertEquals("de", response.get(2).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(1).getDetectedLanguage().getScore());
assertEquals(1, response.get(2).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(1).getTranslations().get(0).getText());
assertNotNull(response.get(2).getTranslations().get(0).getText());
}
@Test
public void translateMultipleTargetLanguages() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
targetLanguages.add("es");
targetLanguages.add("de");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals(3, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(0).getTranslations().get(1).getText());
assertNotNull(response.get(0).getTranslations().get(2).getText());
}
@Test
public void translateDifferentTextTypes() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<html><body>This <b>is</b> a test.</body></html>"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
}
@Test
public void translateWithProfanity() {
ProfanityAction profanityAction = ProfanityAction.MARKED;
ProfanityMarker profanityMarker = ProfanityMarker.ASTERISK;
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hant");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("shit this is fucking crazy"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, profanityAction, profanityMarker, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("***"));
}
@Test
public void translateWithAlignment() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, true, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getAlignment().getProj());
}
@Test
public void translateWithIncludeSentenceLength() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("fr");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, null, true, null, null, null, null);
assertEquals(1, response.size());
assertEquals("fr", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getSrcSentLen().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getTransSentLen().size());
}
@Test
public void translateWithCustomEndpoint() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClientWithCustomEndpoint().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithToken() throws Exception {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClientWithToken().translate(targetLanguages, content);
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
} |
I will make a task for myself. Would you be able to find any existing API which is doing the same? Is it custom code in Java or does it require the TypeSpec change? | public void translateWithNoTranslateTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("en");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<span class=notranslate>今天是怎么回事是</span>非常可怕的"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("今天是怎么回事是"));
} | List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null); | public void translateWithNoTranslateTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("en");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<span class=notranslate>今天是怎么回事是</span>非常可怕的"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("今天是怎么回事是"));
} | class TranslateTests extends TextTranslationClientBase {
@Test
public void translateBasic() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("Hola mundo"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithAutoDetect() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
@Test
public void translateWithDictionaryTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("es");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "en", null, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("es", response.get(0).getTranslations().get(0).getTo());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("wordomatic"));
}
@Test
public void translateWithTransliteration() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hans");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("hudha akhtabar."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "ar", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getSourceText().getText());
assertEquals("zh-Hans", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateFromLatinToLatinScript() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("ta");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("ap kaise ho"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "hi", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getTranslations().get(0).getTransliteration().getScript());
assertEquals("eppadi irukkiraai?", response.get(0).getTranslations().get(0).getTransliteration().getText());
}
@Test
public void translateWithMultipleInputTexts() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
content.add(new InputTextItem("Esto es una prueba."));
content.add(new InputTextItem("Dies ist ein Test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(3, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals("es", response.get(1).getDetectedLanguage().getLanguage());
assertEquals("de", response.get(2).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(1).getDetectedLanguage().getScore());
assertEquals(1, response.get(2).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(1).getTranslations().get(0).getText());
assertNotNull(response.get(2).getTranslations().get(0).getText());
}
@Test
public void translateMultipleTargetLanguages() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
targetLanguages.add("es");
targetLanguages.add("de");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals(3, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(0).getTranslations().get(1).getText());
assertNotNull(response.get(0).getTranslations().get(2).getText());
}
@Test
public void translateDifferentTextTypes() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<html><body>This <b>is</b> a test.</body></html>"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
}
@Test
public void translateWithProfanity() {
ProfanityAction profanityAction = ProfanityAction.MARKED;
ProfanityMarker profanityMarker = ProfanityMarker.ASTERISK;
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hant");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("shit this is fucking crazy"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, profanityAction, profanityMarker, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("***"));
}
@Test
public void translateWithAlignment() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, true, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getAlignment().getProj());
}
@Test
public void translateWithIncludeSentenceLength() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("fr");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, null, true, null, null, null, null);
assertEquals(1, response.size());
assertEquals("fr", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getSrcSentLen().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getTransSentLen().size());
}
@Test
public void translateWithCustomEndpoint() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClientWithCustomEndpoint().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithToken() throws Exception {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClientWithToken().translate(targetLanguages, content);
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
} | class TranslateTests extends TextTranslationClientBase {
@Test
public void translateBasic() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("Hola mundo"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithAutoDetect() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
@Test
public void translateWithDictionaryTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("es");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "en", null, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("es", response.get(0).getTranslations().get(0).getTo());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("wordomatic"));
}
@Test
public void translateWithTransliteration() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hans");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("hudha akhtabar."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "ar", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getSourceText().getText());
assertEquals("zh-Hans", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateFromLatinToLatinScript() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("ta");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("ap kaise ho"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "hi", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getTranslations().get(0).getTransliteration().getScript());
assertEquals("eppadi irukkiraai?", response.get(0).getTranslations().get(0).getTransliteration().getText());
}
@Test
public void translateWithMultipleInputTexts() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
content.add(new InputTextItem("Esto es una prueba."));
content.add(new InputTextItem("Dies ist ein Test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(3, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals("es", response.get(1).getDetectedLanguage().getLanguage());
assertEquals("de", response.get(2).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(1).getDetectedLanguage().getScore());
assertEquals(1, response.get(2).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(1).getTranslations().get(0).getText());
assertNotNull(response.get(2).getTranslations().get(0).getText());
}
@Test
public void translateMultipleTargetLanguages() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
targetLanguages.add("es");
targetLanguages.add("de");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals(3, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(0).getTranslations().get(1).getText());
assertNotNull(response.get(0).getTranslations().get(2).getText());
}
@Test
public void translateDifferentTextTypes() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<html><body>This <b>is</b> a test.</body></html>"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
}
@Test
public void translateWithProfanity() {
ProfanityAction profanityAction = ProfanityAction.MARKED;
ProfanityMarker profanityMarker = ProfanityMarker.ASTERISK;
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hant");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("shit this is fucking crazy"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, profanityAction, profanityMarker, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("***"));
}
@Test
public void translateWithAlignment() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, true, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getAlignment().getProj());
}
@Test
public void translateWithIncludeSentenceLength() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("fr");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, null, true, null, null, null, null);
assertEquals(1, response.size());
assertEquals("fr", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getSrcSentLen().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getTransSentLen().size());
}
@Test
public void translateWithCustomEndpoint() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClientWithCustomEndpoint().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithToken() throws Exception {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClientWithToken().translate(targetLanguages, content);
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
} |
At present, it had to be done in Java code. If done by partial-update, one just remove the `@Generated` annotation over the method, and modify it in place (in this case, also need to add a e.g. `TranslateOptions` class with ctor/setter/getter. If done by the customization class, it could be a bit more complex. But generally same idea of modifying the method and add a class. There were a long discussion of whether we can do cross-language support for this case in typespec client.tsp. So far, no final decision yet. | public void translateWithNoTranslateTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("en");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<span class=notranslate>今天是怎么回事是</span>非常可怕的"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("今天是怎么回事是"));
} | List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null); | public void translateWithNoTranslateTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("en");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<span class=notranslate>今天是怎么回事是</span>非常可怕的"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "zh-Hans", TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("今天是怎么回事是"));
} | class TranslateTests extends TextTranslationClientBase {
@Test
public void translateBasic() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("Hola mundo"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithAutoDetect() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
@Test
public void translateWithDictionaryTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("es");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "en", null, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("es", response.get(0).getTranslations().get(0).getTo());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("wordomatic"));
}
@Test
public void translateWithTransliteration() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hans");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("hudha akhtabar."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "ar", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getSourceText().getText());
assertEquals("zh-Hans", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateFromLatinToLatinScript() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("ta");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("ap kaise ho"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "hi", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getTranslations().get(0).getTransliteration().getScript());
assertEquals("eppadi irukkiraai?", response.get(0).getTranslations().get(0).getTransliteration().getText());
}
@Test
public void translateWithMultipleInputTexts() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
content.add(new InputTextItem("Esto es una prueba."));
content.add(new InputTextItem("Dies ist ein Test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(3, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals("es", response.get(1).getDetectedLanguage().getLanguage());
assertEquals("de", response.get(2).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(1).getDetectedLanguage().getScore());
assertEquals(1, response.get(2).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(1).getTranslations().get(0).getText());
assertNotNull(response.get(2).getTranslations().get(0).getText());
}
@Test
public void translateMultipleTargetLanguages() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
targetLanguages.add("es");
targetLanguages.add("de");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals(3, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(0).getTranslations().get(1).getText());
assertNotNull(response.get(0).getTranslations().get(2).getText());
}
@Test
public void translateDifferentTextTypes() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<html><body>This <b>is</b> a test.</body></html>"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
}
@Test
public void translateWithProfanity() {
ProfanityAction profanityAction = ProfanityAction.MARKED;
ProfanityMarker profanityMarker = ProfanityMarker.ASTERISK;
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hant");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("shit this is fucking crazy"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, profanityAction, profanityMarker, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("***"));
}
@Test
public void translateWithAlignment() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, true, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getAlignment().getProj());
}
@Test
public void translateWithIncludeSentenceLength() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("fr");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, null, true, null, null, null, null);
assertEquals(1, response.size());
assertEquals("fr", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getSrcSentLen().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getTransSentLen().size());
}
@Test
public void translateWithCustomEndpoint() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClientWithCustomEndpoint().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithToken() throws Exception {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClientWithToken().translate(targetLanguages, content);
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
} | class TranslateTests extends TextTranslationClientBase {
@Test
public void translateBasic() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("Hola mundo"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithAutoDetect() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("cs", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
@Test
public void translateWithDictionaryTag() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("es");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "en", null, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("es", response.get(0).getTranslations().get(0).getTo());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("wordomatic"));
}
@Test
public void translateWithTransliteration() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hans");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("hudha akhtabar."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "ar", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getSourceText().getText());
assertEquals("zh-Hans", response.get(0).getTranslations().get(0).getTo());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateFromLatinToLatinScript() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("ta");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("ap kaise ho"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, "hi", null, null, null, null, null, null, null, "Latn", "Latn", null);
assertNotNull(response.get(0).getTranslations().get(0).getTransliteration().getScript());
assertEquals("eppadi irukkiraai?", response.get(0).getTranslations().get(0).getTransliteration().getText());
}
@Test
public void translateWithMultipleInputTexts() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
content.add(new InputTextItem("Esto es una prueba."));
content.add(new InputTextItem("Dies ist ein Test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(3, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals("es", response.get(1).getDetectedLanguage().getLanguage());
assertEquals("de", response.get(2).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(1).getDetectedLanguage().getScore());
assertEquals(1, response.get(2).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(1).getTranslations().get(0).getText());
assertNotNull(response.get(2).getTranslations().get(0).getText());
}
@Test
public void translateMultipleTargetLanguages() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
targetLanguages.add("es");
targetLanguages.add("de");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals(3, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertNotNull(response.get(0).getTranslations().get(1).getText());
assertNotNull(response.get(0).getTranslations().get(2).getText());
}
@Test
public void translateDifferentTextTypes() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("<html><body>This <b>is</b> a test.</body></html>"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, TextType.HTML, null, null, null, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
}
@Test
public void translateWithProfanity() {
ProfanityAction profanityAction = ProfanityAction.MARKED;
ProfanityMarker profanityMarker = ProfanityMarker.ASTERISK;
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("zh-Hant");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("shit this is fucking crazy"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, profanityAction, profanityMarker, null, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertTrue(response.get(0).getTranslations().get(0).getText().contains("***"));
}
@Test
public void translateWithAlignment() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, true, null, null, null, null, null);
assertEquals(1, response.size());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertNotNull(response.get(0).getTranslations().get(0).getAlignment().getProj());
}
@Test
public void translateWithIncludeSentenceLength() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("fr");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où."));
List<TranslatedTextItem> response = getTranslationClient().translate(targetLanguages, content, null, null, null, null, null, null, null, true, null, null, null, null);
assertEquals(1, response.size());
assertEquals("fr", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getSrcSentLen().size());
assertEquals(3, response.get(0).getTranslations().get(0).getSentLen().getTransSentLen().size());
}
@Test
public void translateWithCustomEndpoint() {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("It is a beautiful morning"));
List<TranslatedTextItem> response = getTranslationClientWithCustomEndpoint().translate(targetLanguages, content);
assertEquals(1, response.size());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
@Test
public void translateWithToken() throws Exception {
ArrayList<String> targetLanguages = new ArrayList<>();
targetLanguages.add("cs");
ArrayList<InputTextItem> content = new ArrayList<>();
content.add(new InputTextItem("This is a test."));
List<TranslatedTextItem> response = getTranslationClientWithToken().translate(targetLanguages, content);
assertNotNull(response.get(0).getTranslations().get(0).getText());
assertEquals("en", response.get(0).getDetectedLanguage().getLanguage());
assertEquals(1, response.get(0).getDetectedLanguage().getScore());
assertEquals(1, response.get(0).getTranslations().size());
assertNotNull(response.get(0).getTranslations().get(0).getText());
}
} |
```suggestion Assertions.fail(e); ``` | public void shouldInvokeReleaserWhenPrefetchDisabled() {
final int prefetch = 0;
final int maxMessages = 5;
final Duration maxWaitTime = Duration.ofMillis(250);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ServiceBusReceivedMessage message0 = mock(ServiceBusReceivedMessage.class);
final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class);
final ArgumentCaptor<ServiceBusReceivedMessage> messageCaptor = ArgumentCaptor.forClass(ServiceBusReceivedMessage.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, prefetch, Duration.ZERO, false);
final Sinks.Many<ServiceBusReceivedMessage> upstream = Sinks.many().multicast().onBackpressureBuffer();
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
when(asyncClient.nonSessionSyncReceiveV2()).thenReturn(upstream.asFlux());
when(asyncClient.release(any())).thenReturn(Mono.empty());
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
upstream.emitNext(message0, Sinks.EmitFailureHandler.FAIL_FAST);
final IterableStream<ServiceBusReceivedMessage> iterable = receiver.receive(maxMessages, maxWaitTime);
final List<ServiceBusReceivedMessage> list = iterable.stream().collect(Collectors.toList());
Assertions.assertEquals(1, list.size());
Assertions.assertEquals(message0, list.get(0));
final Sinks.EmitResult emitResult = upstream.tryEmitNext(message1);
Assertions.assertEquals(Sinks.EmitResult.OK, emitResult);
try {
Thread.sleep(500);
} catch (InterruptedException e) {
LOGGER.warning("Exception while wait. ", e);
}
verify(asyncClient).release(messageCaptor.capture());
verify(asyncClient, times(1)).release(any());
final ServiceBusReceivedMessage releasedMessage = messageCaptor.getValue();
Assertions.assertEquals(message1, releasedMessage);
} | LOGGER.warning("Exception while wait. ", e); | public void shouldInvokeReleaserWhenPrefetchDisabled() {
final int prefetch = 0;
final int maxMessages = 5;
final Duration maxWaitTime = Duration.ofMillis(250);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ServiceBusReceivedMessage message0 = mock(ServiceBusReceivedMessage.class);
final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class);
final ArgumentCaptor<ServiceBusReceivedMessage> messageCaptor = ArgumentCaptor.forClass(ServiceBusReceivedMessage.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, prefetch, Duration.ZERO, false);
final Sinks.Many<ServiceBusReceivedMessage> upstream = Sinks.many().multicast().onBackpressureBuffer();
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
when(asyncClient.nonSessionSyncReceiveV2()).thenReturn(upstream.asFlux());
when(asyncClient.release(any())).thenReturn(Mono.empty());
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
upstream.emitNext(message0, Sinks.EmitFailureHandler.FAIL_FAST);
final IterableStream<ServiceBusReceivedMessage> iterable = receiver.receive(maxMessages, maxWaitTime);
final List<ServiceBusReceivedMessage> list = iterable.stream().collect(Collectors.toList());
Assertions.assertEquals(1, list.size());
Assertions.assertEquals(message0, list.get(0));
final Sinks.EmitResult emitResult = upstream.tryEmitNext(message1);
Assertions.assertEquals(Sinks.EmitResult.OK, emitResult);
try {
Thread.sleep(500);
} catch (InterruptedException e) {
Assertions.fail(e);
}
verify(asyncClient).release(messageCaptor.capture());
verify(asyncClient, times(1)).release(any());
final ServiceBusReceivedMessage releasedMessage = messageCaptor.getValue();
Assertions.assertEquals(message1, releasedMessage);
} | class SynchronousReceiverTest {
private static final String NAMESPACE = "namespace";
private static final String ENTITY_PATH = "entity-path";
private static final ClientLogger LOGGER = new ClientLogger(SynchronousReceiverTest.class);
private static final ServiceBusReceiverInstrumentation NO_INSTRUMENTATION = new ServiceBusReceiverInstrumentation(null, null,
NAMESPACE, ENTITY_PATH, null, ReceiverKind.SYNC_RECEIVER);
private AutoCloseable mocksCloseable;
@BeforeEach
void setup() {
mocksCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
void teardown() throws Exception {
Mockito.framework().clearInlineMock(this);
if (mocksCloseable != null) {
mocksCloseable.close();
}
}
@Test
public void shouldErrorIterableStreamIfDisposed() {
final int maxMessages = 1;
final Duration maxWaitTime = Duration.ofMillis(500);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, 0, Duration.ZERO, false);
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
receiver.dispose();
final RuntimeException e = Assertions.assertThrows(RuntimeException.class, () -> {
receiver.receive(maxMessages, maxWaitTime).stream().collect(Collectors.toList());
});
Assertions.assertNotNull(e.getCause());
Assertions.assertEquals("Disposed.", e.getCause().getMessage());
}
@Test
public void shouldSubscribeToUpstreamOnlyOnce() {
final int maxMessages = 1;
final Duration maxWaitTime = Duration.ofMillis(250);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, 0, Duration.ZERO, false);
final TestPublisher<ServiceBusReceivedMessage> upstream = TestPublisher.create();
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
when(asyncClient.nonSessionSyncReceiveV2()).thenReturn(upstream.flux());
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
final IterableStream<ServiceBusReceivedMessage> iterable0 = receiver.receive(maxMessages, maxWaitTime);
final IterableStream<ServiceBusReceivedMessage> iterable1 = receiver.receive(maxMessages, maxWaitTime);
iterable0.stream().collect(Collectors.toList());
iterable1.stream().collect(Collectors.toList());
upstream.assertSubscribers(1);
}
@Test
@Test
public void shouldNotInvokeReleaserWhenPrefetchEnabled() {
final int prefetch = 1;
final int maxMessages = 5;
final Duration maxWaitTime = Duration.ofMillis(250);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ServiceBusReceivedMessage message0 = mock(ServiceBusReceivedMessage.class);
final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, prefetch, Duration.ZERO, false);
final Sinks.Many<ServiceBusReceivedMessage> upstream = Sinks.many().multicast().onBackpressureBuffer();
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
when(asyncClient.nonSessionSyncReceiveV2()).thenReturn(upstream.asFlux());
when(asyncClient.release(any())).thenReturn(Mono.empty());
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
upstream.emitNext(message0, Sinks.EmitFailureHandler.FAIL_FAST);
final IterableStream<ServiceBusReceivedMessage> iterable = receiver.receive(maxMessages, maxWaitTime);
final List<ServiceBusReceivedMessage> list = iterable.stream().collect(Collectors.toList());
Assertions.assertEquals(1, list.size());
Assertions.assertEquals(message0, list.get(0));
upstream.emitNext(message1, Sinks.EmitFailureHandler.FAIL_FAST);
verify(asyncClient, times(0)).release(any());
}
@Test
public void shouldCancelUpstreamOnDispose() {
final int prefetch = 0;
final int maxMessages = 2;
final Duration maxWaitTime = Duration.ofMillis(250);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ServiceBusReceivedMessage message0 = mock(ServiceBusReceivedMessage.class);
final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, prefetch, Duration.ZERO, false);
final Sinks.Many<ServiceBusReceivedMessage> upstream = Sinks.many().multicast().onBackpressureBuffer();
final AtomicBoolean upstreamCanceled = new AtomicBoolean(false);
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
when(asyncClient.nonSessionSyncReceiveV2()).thenReturn(upstream.asFlux().doOnCancel(() -> upstreamCanceled.set(true)));
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
upstream.emitNext(message0, Sinks.EmitFailureHandler.FAIL_FAST);
upstream.emitNext(message1, Sinks.EmitFailureHandler.FAIL_FAST);
final IterableStream<ServiceBusReceivedMessage> iterable = receiver.receive(maxMessages, maxWaitTime);
final List<ServiceBusReceivedMessage> list = iterable.stream().collect(Collectors.toList());
Assertions.assertEquals(2, list.size());
Assertions.assertEquals(Arrays.asList(message0, message1), list);
Assertions.assertFalse(upstreamCanceled.get());
receiver.dispose();
Assertions.assertTrue(upstreamCanceled.get());
}
} | class SynchronousReceiverTest {
private static final String NAMESPACE = "namespace";
private static final String ENTITY_PATH = "entity-path";
private static final ClientLogger LOGGER = new ClientLogger(SynchronousReceiverTest.class);
private static final ServiceBusReceiverInstrumentation NO_INSTRUMENTATION = new ServiceBusReceiverInstrumentation(null, null,
NAMESPACE, ENTITY_PATH, null, ReceiverKind.SYNC_RECEIVER);
private AutoCloseable mocksCloseable;
@BeforeEach
void setup() {
mocksCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
void teardown() throws Exception {
Mockito.framework().clearInlineMock(this);
if (mocksCloseable != null) {
mocksCloseable.close();
}
}
@Test
public void shouldErrorIterableStreamIfDisposed() {
final int maxMessages = 1;
final Duration maxWaitTime = Duration.ofMillis(500);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, 0, Duration.ZERO, false);
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
receiver.dispose();
final RuntimeException e = Assertions.assertThrows(RuntimeException.class, () -> {
receiver.receive(maxMessages, maxWaitTime).stream().collect(Collectors.toList());
});
Assertions.assertNotNull(e.getCause());
Assertions.assertEquals("Disposed.", e.getCause().getMessage());
}
@Test
public void shouldSubscribeToUpstreamOnlyOnce() {
final int maxMessages = 1;
final Duration maxWaitTime = Duration.ofMillis(250);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, 0, Duration.ZERO, false);
final TestPublisher<ServiceBusReceivedMessage> upstream = TestPublisher.create();
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
when(asyncClient.nonSessionSyncReceiveV2()).thenReturn(upstream.flux());
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
final IterableStream<ServiceBusReceivedMessage> iterable0 = receiver.receive(maxMessages, maxWaitTime);
final IterableStream<ServiceBusReceivedMessage> iterable1 = receiver.receive(maxMessages, maxWaitTime);
iterable0.stream().collect(Collectors.toList());
iterable1.stream().collect(Collectors.toList());
upstream.assertSubscribers(1);
}
@Test
@Test
public void shouldNotInvokeReleaserWhenPrefetchEnabled() {
final int prefetch = 1;
final int maxMessages = 5;
final Duration maxWaitTime = Duration.ofMillis(250);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ServiceBusReceivedMessage message0 = mock(ServiceBusReceivedMessage.class);
final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, prefetch, Duration.ZERO, false);
final Sinks.Many<ServiceBusReceivedMessage> upstream = Sinks.many().multicast().onBackpressureBuffer();
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
when(asyncClient.nonSessionSyncReceiveV2()).thenReturn(upstream.asFlux());
when(asyncClient.release(any())).thenReturn(Mono.empty());
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
upstream.emitNext(message0, Sinks.EmitFailureHandler.FAIL_FAST);
final IterableStream<ServiceBusReceivedMessage> iterable = receiver.receive(maxMessages, maxWaitTime);
final List<ServiceBusReceivedMessage> list = iterable.stream().collect(Collectors.toList());
Assertions.assertEquals(1, list.size());
Assertions.assertEquals(message0, list.get(0));
final Sinks.EmitResult emitResult = upstream.tryEmitNext(message1);
Assertions.assertEquals(Sinks.EmitResult.OK, emitResult);
try {
Thread.sleep(250);
} catch (InterruptedException e) {
Assertions.fail(e);
}
verify(asyncClient, times(0)).release(any());
}
@Test
public void shouldCancelUpstreamOnDispose() {
final int prefetch = 0;
final int maxMessages = 2;
final Duration maxWaitTime = Duration.ofMillis(250);
final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class);
final ServiceBusReceivedMessage message0 = mock(ServiceBusReceivedMessage.class);
final ServiceBusReceivedMessage message1 = mock(ServiceBusReceivedMessage.class);
final ReceiverOptions receiverOptions = ReceiverOptions.createNonSessionOptions(PEEK_LOCK, prefetch, Duration.ZERO, false);
final Sinks.Many<ServiceBusReceivedMessage> upstream = Sinks.many().multicast().onBackpressureBuffer();
final AtomicBoolean upstreamCanceled = new AtomicBoolean(false);
when((asyncClient.getReceiverOptions())).thenReturn(receiverOptions);
when(asyncClient.isV2()).thenReturn(true);
when(asyncClient.getInstrumentation()).thenReturn(NO_INSTRUMENTATION);
when(asyncClient.nonSessionSyncReceiveV2()).thenReturn(upstream.asFlux().doOnCancel(() -> upstreamCanceled.set(true)));
final SynchronousReceiver receiver = new SynchronousReceiver(LOGGER, asyncClient);
upstream.emitNext(message0, Sinks.EmitFailureHandler.FAIL_FAST);
upstream.emitNext(message1, Sinks.EmitFailureHandler.FAIL_FAST);
final IterableStream<ServiceBusReceivedMessage> iterable = receiver.receive(maxMessages, maxWaitTime);
final List<ServiceBusReceivedMessage> list = iterable.stream().collect(Collectors.toList());
Assertions.assertEquals(2, list.size());
Assertions.assertEquals(Arrays.asList(message0, message1), list);
Assertions.assertFalse(upstreamCanceled.get());
receiver.dispose();
Assertions.assertTrue(upstreamCanceled.get());
}
} |
❤️ | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(AZURE_IGNORE_RESPONSE_BODY).orElse(false);
Mono<java.net.http.HttpRequest> jdkRequestMono = Mono.fromCallable(() -> toJdkHttpRequest(request, context));
if (eagerlyReadResponse || ignoreResponseBody) {
java.net.http.HttpResponse.BodyHandler<byte[]> bodyHandler = getResponseHandler(readTimeout,
java.net.http.HttpResponse.BodyHandlers::ofByteArray, ByteArrayTimeoutResponseSubscriber::new);
return jdkRequestMono
.flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, bodyHandler)))
.map(jdkResponse -> {
HttpHeaders headers = fromJdkHttpHeaders(jdkResponse.headers());
int statusCode = jdkResponse.statusCode();
return new JdkHttpResponseSync(request, statusCode, headers, jdkResponse.body());
});
} else {
return jdkRequestMono
.flatMap(jdkRequest -> Mono.fromCompletionStage(
jdkHttpClient.sendAsync(jdkRequest, java.net.http.HttpResponse.BodyHandlers.ofPublisher())))
.map(jdkResponse -> new JdkHttpResponseAsync(request, jdkResponse, readTimeout));
}
} | public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(AZURE_IGNORE_RESPONSE_BODY).orElse(false);
Mono<java.net.http.HttpRequest> jdkRequestMono = Mono.fromCallable(() -> toJdkHttpRequest(request, context));
if (eagerlyReadResponse || ignoreResponseBody) {
java.net.http.HttpResponse.BodyHandler<byte[]> bodyHandler = getResponseHandler(hasReadTimeout, readTimeout,
java.net.http.HttpResponse.BodyHandlers::ofByteArray, ByteArrayTimeoutResponseSubscriber::new);
return jdkRequestMono
.flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, bodyHandler)))
.map(jdkResponse -> {
HttpHeaders headers = fromJdkHttpHeaders(jdkResponse.headers());
int statusCode = jdkResponse.statusCode();
return new JdkHttpResponseSync(request, statusCode, headers, jdkResponse.body());
});
} else {
return jdkRequestMono
.flatMap(jdkRequest -> Mono.fromCompletionStage(
jdkHttpClient.sendAsync(jdkRequest, java.net.http.HttpResponse.BodyHandlers.ofPublisher())))
.map(jdkResponse -> new JdkHttpResponseAsync(request, readTimeout, hasReadTimeout, jdkResponse));
}
} | class JdkHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(JdkHttpClient.class);
private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response";
private static final String AZURE_IGNORE_RESPONSE_BODY = "azure-ignore-response-body";
private final java.net.http.HttpClient jdkHttpClient;
private final Set<String> restrictedHeaders;
private final Duration writeTimeout;
private final Duration responseTimeout;
private final Duration readTimeout;
JdkHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders, Duration writeTimeout,
Duration responseTimeout, Duration readTimeout) {
this.jdkHttpClient = httpClient;
int javaVersion = getJavaVersion();
if (javaVersion <= 11) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below."));
}
this.restrictedHeaders = restrictedHeaders;
LOGGER.verbose("Effective restricted headers: {}", restrictedHeaders);
this.writeTimeout = writeTimeout;
this.responseTimeout = responseTimeout;
this.readTimeout = readTimeout;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(AZURE_IGNORE_RESPONSE_BODY).orElse(false);
java.net.http.HttpRequest jdkRequest = toJdkHttpRequest(request, context);
try {
if (eagerlyReadResponse || ignoreResponseBody) {
java.net.http.HttpResponse.BodyHandler<byte[]> bodyHandler = getResponseHandler(readTimeout,
java.net.http.HttpResponse.BodyHandlers::ofByteArray, ByteArrayTimeoutResponseSubscriber::new);
java.net.http.HttpResponse<byte[]> jdKResponse = jdkHttpClient.send(jdkRequest, bodyHandler);
return new JdkHttpResponseSync(request, jdKResponse.statusCode(),
fromJdkHttpHeaders(jdKResponse.headers()), jdKResponse.body());
} else {
java.net.http.HttpResponse.BodyHandler<InputStream> bodyHandler = getResponseHandler(readTimeout,
java.net.http.HttpResponse.BodyHandlers::ofInputStream, InputStreamTimeoutResponseSubscriber::new);
return new JdkHttpResponseSync(request, jdkHttpClient.send(jdkRequest, bodyHandler));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
} catch (InterruptedException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Converts the given azure-core request to the JDK HttpRequest type.
*
* @param request the azure-core request
* @return the HttpRequest
*/
private java.net.http.HttpRequest toJdkHttpRequest(HttpRequest request, Context context) {
return new AzureJdkHttpRequest(request, context, restrictedHeaders, LOGGER, writeTimeout, responseTimeout);
}
/**
* Get the java runtime major version.
*
* @return the java major version
*/
private static int getJavaVersion() {
return Runtime.version().feature();
}
/**
* Gets the response body handler based on whether a read timeout is configured.
* <p>
* When a read timeout is configured our custom handler is used that tracks the time taken between each read
* operation to pull the body from the network. If a timeout isn't configured the built-in JDK handler is used.
*
* @param readTimeout The configured read timeout.
* @param jdkBodyHandler The JDK body handler to use when no read timeout is configured.
* @param timeoutSubscriber The supplier for the custom body subscriber to use when a read timeout is configured.
* @return The response body handler to use.
* @param <T> The type of the response body.
*/
private static <T> java.net.http.HttpResponse.BodyHandler<T> getResponseHandler(Duration readTimeout,
Supplier<java.net.http.HttpResponse.BodyHandler<T>> jdkBodyHandler,
Function<Long, java.net.http.HttpResponse.BodySubscriber<T>> timeoutSubscriber) {
return (readTimeout != null && !readTimeout.isNegative() && !readTimeout.isZero())
? responseInfo -> timeoutSubscriber.apply(readTimeout.toMillis())
: jdkBodyHandler.get();
}
} | class JdkHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(JdkHttpClient.class);
private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response";
private static final String AZURE_IGNORE_RESPONSE_BODY = "azure-ignore-response-body";
private final java.net.http.HttpClient jdkHttpClient;
private final Set<String> restrictedHeaders;
private final Duration writeTimeout;
private final Duration responseTimeout;
private final Duration readTimeout;
private final boolean hasReadTimeout;
JdkHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders, Duration writeTimeout,
Duration responseTimeout, Duration readTimeout) {
this.jdkHttpClient = httpClient;
int javaVersion = getJavaVersion();
if (javaVersion <= 11) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below."));
}
this.restrictedHeaders = restrictedHeaders;
LOGGER.verbose("Effective restricted headers: {}", restrictedHeaders);
this.writeTimeout
= (writeTimeout != null && !writeTimeout.isNegative() && !writeTimeout.isZero()) ? writeTimeout : null;
this.responseTimeout = (responseTimeout != null && !responseTimeout.isNegative() && !responseTimeout.isZero())
? responseTimeout
: null;
this.readTimeout = readTimeout;
this.hasReadTimeout = readTimeout != null && !readTimeout.isNegative() && !readTimeout.isZero();
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(AZURE_IGNORE_RESPONSE_BODY).orElse(false);
java.net.http.HttpRequest jdkRequest = toJdkHttpRequest(request, context);
try {
if (eagerlyReadResponse || ignoreResponseBody) {
java.net.http.HttpResponse.BodyHandler<byte[]> bodyHandler
= getResponseHandler(hasReadTimeout, readTimeout,
java.net.http.HttpResponse.BodyHandlers::ofByteArray, ByteArrayTimeoutResponseSubscriber::new);
java.net.http.HttpResponse<byte[]> jdKResponse = jdkHttpClient.send(jdkRequest, bodyHandler);
return new JdkHttpResponseSync(request, jdKResponse.statusCode(),
fromJdkHttpHeaders(jdKResponse.headers()), jdKResponse.body());
} else {
java.net.http.HttpResponse.BodyHandler<InputStream> bodyHandler = getResponseHandler(hasReadTimeout,
readTimeout, java.net.http.HttpResponse.BodyHandlers::ofInputStream,
InputStreamTimeoutResponseSubscriber::new);
return new JdkHttpResponseSync(request, jdkHttpClient.send(jdkRequest, bodyHandler));
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
} catch (InterruptedException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Converts the given azure-core request to the JDK HttpRequest type.
*
* @param request the azure-core request
* @return the HttpRequest
*/
private java.net.http.HttpRequest toJdkHttpRequest(HttpRequest request, Context context) {
return new AzureJdkHttpRequest(request, context, restrictedHeaders, LOGGER, writeTimeout, responseTimeout);
}
/**
* Get the java runtime major version.
*
* @return the java major version
*/
private static int getJavaVersion() {
return Runtime.version().feature();
}
/**
* Gets the response body handler based on whether a read timeout is configured.
* <p>
* When a read timeout is configured our custom handler is used that tracks the time taken between each read
* operation to pull the body from the network. If a timeout isn't configured the built-in JDK handler is used.
*
* @param hasReadTimeout Flag indicating if a read timeout is configured.
* @param readTimeout The configured read timeout.
* @param jdkBodyHandler The JDK body handler to use when no read timeout is configured.
* @param timeoutSubscriber The supplier for the custom body subscriber to use when a read timeout is configured.
* @return The response body handler to use.
* @param <T> The type of the response body.
*/
private static <T> java.net.http.HttpResponse.BodyHandler<T> getResponseHandler(boolean hasReadTimeout,
Duration readTimeout, Supplier<java.net.http.HttpResponse.BodyHandler<T>> jdkBodyHandler,
Function<Long, java.net.http.HttpResponse.BodySubscriber<T>> timeoutSubscriber) {
return hasReadTimeout ? responseInfo -> timeoutSubscriber.apply(readTimeout.toMillis()) : jdkBodyHandler.get();
}
} | |
nit ;) ```suggestion LOGGER.atVerbose() .addKeyValue("successCount", successCount.incrementAndGet()) .log("Download complete"); ``` | public void downloadToFileWithFaultInjection() throws IOException, InterruptedException {
byte[] realFileBytes = new byte[9 * Constants.MB - 1];
ThreadLocalRandom.current().nextBytes(realFileBytes);
containerClient.getBlobClient(containerClient.getBlobContainerName())
.upload(BinaryData.fromBytes(realFileBytes), true);
BlobClient downloadClient = new BlobClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.containerName(containerClient.getBlobContainerName())
.blobName(containerClient.getBlobContainerName())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(new HttpFaultInjectingHttpClient(getFaultInjectingWrappedHttpClient()))
.retryOptions(new RequestRetryOptions(RetryPolicyType.FIXED, 4, null, 10L, 10L, null))
.buildClient();
List<File> files = new ArrayList<>(500);
for (int i = 0; i < 500; i++) {
File file = File.createTempFile(CoreUtils.randomUuid().toString() + i, ".txt");
file.deleteOnExit();
files.add(file);
}
AtomicInteger successCount = new AtomicInteger();
Set<OpenOption> overwriteOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING,
StandardOpenOption.READ, StandardOpenOption.WRITE));
ExecutorService executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
executorService.invokeAll(files.stream().map(it -> (Callable<Void>) () -> {
try {
downloadClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(it.getAbsolutePath())
.setOpenOptions(overwriteOptions)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxConcurrency(2)),
null, Context.NONE);
byte[] actualFileBytes = Files.readAllBytes(it.toPath());
TestUtils.assertArraysEqual(realFileBytes, actualFileBytes);
LOGGER.atVerbose().log(() -> "Successful complete count: " + successCount.incrementAndGet());
Files.deleteIfExists(it.toPath());
} catch (Exception ex) {
LOGGER.atWarning()
.log(() -> "Failed to complete download, target download file: " + it.getAbsolutePath(), ex);
}
return null;
}).collect(Collectors.toList()));
executorService.shutdown();
executorService.awaitTermination(10, TimeUnit.MINUTES);
assertTrue(successCount.get() >= 450);
files.forEach(it -> {
try {
Files.deleteIfExists(it.toPath());
} catch (IOException e) {
LOGGER.atWarning().log(() -> "Failed to delete file: " + it.getAbsolutePath(), e);
}
});
} | LOGGER.atVerbose().log(() -> "Successful complete count: " + successCount.incrementAndGet()); | public void downloadToFileWithFaultInjection() throws IOException, InterruptedException {
byte[] realFileBytes = new byte[9 * Constants.MB - 1];
ThreadLocalRandom.current().nextBytes(realFileBytes);
containerClient.getBlobClient(containerClient.getBlobContainerName())
.upload(BinaryData.fromBytes(realFileBytes), true);
BlobClient downloadClient = new BlobClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.containerName(containerClient.getBlobContainerName())
.blobName(containerClient.getBlobContainerName())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(new HttpFaultInjectingHttpClient(getFaultInjectingWrappedHttpClient()))
.retryOptions(new RequestRetryOptions(RetryPolicyType.FIXED, 4, null, 10L, 10L, null))
.buildClient();
List<File> files = new ArrayList<>(500);
for (int i = 0; i < 500; i++) {
File file = File.createTempFile(CoreUtils.randomUuid().toString() + i, ".txt");
file.deleteOnExit();
files.add(file);
}
AtomicInteger successCount = new AtomicInteger();
Set<OpenOption> overwriteOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING,
StandardOpenOption.READ, StandardOpenOption.WRITE));
ExecutorService executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
executorService.invokeAll(files.stream().map(it -> (Callable<Void>) () -> {
try {
downloadClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(it.getAbsolutePath())
.setOpenOptions(overwriteOptions)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxConcurrency(2)),
null, Context.NONE);
byte[] actualFileBytes = Files.readAllBytes(it.toPath());
TestUtils.assertArraysEqual(realFileBytes, actualFileBytes);
LOGGER.atVerbose()
.addKeyValue("successCount", successCount.incrementAndGet())
.log("Download completed successfully.");
Files.deleteIfExists(it.toPath());
} catch (Exception ex) {
LOGGER.atWarning()
.addKeyValue("downloadFile", it.getAbsolutePath())
.log("Failed to complete download.", ex);
}
return null;
}).collect(Collectors.toList()));
executorService.shutdown();
executorService.awaitTermination(10, TimeUnit.MINUTES);
assertTrue(successCount.get() >= 450);
files.forEach(it -> {
try {
Files.deleteIfExists(it.toPath());
} catch (IOException e) {
LOGGER.atWarning()
.addKeyValue("file", it.getAbsolutePath())
.log("Failed to delete file.", e);
}
});
} | class HttpFaultInjectingTests {
private static final ClientLogger LOGGER = new ClientLogger(HttpFaultInjectingTests.class);
private static final HttpHeaderName UPSTREAM_URI_HEADER = HttpHeaderName.fromString("X-Upstream-Base-Uri");
private static final HttpHeaderName HTTP_FAULT_INJECTOR_RESPONSE_HEADER
= HttpHeaderName.fromString("x-ms-faultinjector-response-option");
private BlobContainerClient containerClient;
@BeforeEach
public void setup() {
String testName = ("httpFaultInjectingTests" + CoreUtils.randomUuid().toString().replace("-", ""))
.toLowerCase();
containerClient = new BlobServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(BlobTestBase.getHttpClient(() -> {
throw new RuntimeException("Test should not run during playback.");
}))
.buildClient()
.createBlobContainer(testName);
}
@AfterEach
public void teardown() {
if (containerClient != null) {
containerClient.delete();
}
}
/**
* Tests downloading to file with fault injection.
* <p>
* This test will upload a single blob of about 9MB and then download it in parallel 500 times. Each download will
* have its file contents compared to the original blob data. The test only cares about files that were properly
* downloaded, if a download fails with a network error it will be ignored. A requirement of 90% of files being
* successfully downloaded is also a requirement to prevent a case where most files failed to download and passing,
* hiding a true issue.
*/
@Test
@SuppressWarnings("unchecked")
private HttpClient getFaultInjectingWrappedHttpClient() {
switch (ENVIRONMENT.getHttpClientType()) {
case NETTY:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(NettyAsyncHttpClientProvider.class));
case OK_HTTP:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(OkHttpAsyncClientProvider.class));
case VERTX:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(VertxAsyncHttpClientProvider.class));
case JDK_HTTP:
try {
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider((Class<? extends HttpClientProvider>) Class.forName(
"com.azure.core.http.jdk.httpclient.JdkHttpClientProvider")));
} catch (ClassNotFoundException e) {
throw new IllegalStateException(e);
}
default:
throw new IllegalArgumentException("Unknown http client type: " + ENVIRONMENT.getHttpClientType());
}
}
private static final class HttpFaultInjectingHttpClient implements HttpClient {
private final HttpClient wrappedHttpClient;
HttpFaultInjectingHttpClient(HttpClient wrappedHttpClient) {
this.wrappedHttpClient = wrappedHttpClient;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
public Mono<HttpResponse> send(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
return wrappedHttpClient.send(request, context)
.map(response -> {
HttpRequest request1 = response.getRequest();
request1.getHeaders().remove(UPSTREAM_URI_HEADER);
request1.setUrl(originalUrl);
return response;
});
}
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
HttpResponse response = wrappedHttpClient.sendSync(request, context);
response.getRequest().setUrl(originalUrl);
response.getRequest().getHeaders().remove(UPSTREAM_URI_HEADER);
return response;
}
private static URL rewriteUrl(URL originalUrl) {
try {
return UrlBuilder.parse(originalUrl)
.setScheme("http")
.setHost("localhost")
.setPort(7777)
.toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static String faultInjectorHandling() {
double random = ThreadLocalRandom.current().nextDouble();
int choice = (int) (random * 100);
if (choice >= 25) {
return "f";
} else if (choice >= 1) {
if (random <= 0.34D) {
return "n";
} else if (random <= 0.67D) {
return "nc";
} else {
return "na";
}
} else {
if (random <= 0.25D) {
return "p";
} else if (random <= 0.50D) {
return "pc";
} else if (random <= 0.75D) {
return "pa";
} else {
return "pn";
}
}
}
}
private static boolean shouldRun() {
String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT);
return ENVIRONMENT.getTestMode() == TestMode.LIVE
&& !osName.contains("mac os")
&& !osName.contains("darwin");
}
} | class HttpFaultInjectingTests {
private static final ClientLogger LOGGER = new ClientLogger(HttpFaultInjectingTests.class);
private static final HttpHeaderName UPSTREAM_URI_HEADER = HttpHeaderName.fromString("X-Upstream-Base-Uri");
private static final HttpHeaderName HTTP_FAULT_INJECTOR_RESPONSE_HEADER
= HttpHeaderName.fromString("x-ms-faultinjector-response-option");
private BlobContainerClient containerClient;
@BeforeEach
public void setup() {
String testName = ("httpFaultInjectingTests" + CoreUtils.randomUuid().toString().replace("-", ""))
.toLowerCase();
containerClient = new BlobServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(BlobTestBase.getHttpClient(() -> {
throw new RuntimeException("Test should not run during playback.");
}))
.buildClient()
.createBlobContainer(testName);
}
@AfterEach
public void teardown() {
if (containerClient != null) {
containerClient.delete();
}
}
/**
* Tests downloading to file with fault injection.
* <p>
* This test will upload a single blob of about 9MB and then download it in parallel 500 times. Each download will
* have its file contents compared to the original blob data. The test only cares about files that were properly
* downloaded, if a download fails with a network error it will be ignored. A requirement of 90% of files being
* successfully downloaded is also a requirement to prevent a case where most files failed to download and passing,
* hiding a true issue.
*/
@Test
@SuppressWarnings("unchecked")
private HttpClient getFaultInjectingWrappedHttpClient() {
switch (ENVIRONMENT.getHttpClientType()) {
case NETTY:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(NettyAsyncHttpClientProvider.class));
case OK_HTTP:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(OkHttpAsyncClientProvider.class));
case VERTX:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(VertxAsyncHttpClientProvider.class));
case JDK_HTTP:
try {
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider((Class<? extends HttpClientProvider>) Class.forName(
"com.azure.core.http.jdk.httpclient.JdkHttpClientProvider")));
} catch (ClassNotFoundException e) {
throw new IllegalStateException(e);
}
default:
throw new IllegalArgumentException("Unknown http client type: " + ENVIRONMENT.getHttpClientType());
}
}
private static final class HttpFaultInjectingHttpClient implements HttpClient {
private final HttpClient wrappedHttpClient;
HttpFaultInjectingHttpClient(HttpClient wrappedHttpClient) {
this.wrappedHttpClient = wrappedHttpClient;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
public Mono<HttpResponse> send(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
return wrappedHttpClient.send(request, context)
.map(response -> {
HttpRequest request1 = response.getRequest();
request1.getHeaders().remove(UPSTREAM_URI_HEADER);
request1.setUrl(originalUrl);
return response;
});
}
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
HttpResponse response = wrappedHttpClient.sendSync(request, context);
response.getRequest().setUrl(originalUrl);
response.getRequest().getHeaders().remove(UPSTREAM_URI_HEADER);
return response;
}
private static URL rewriteUrl(URL originalUrl) {
try {
return UrlBuilder.parse(originalUrl)
.setScheme("http")
.setHost("localhost")
.setPort(7777)
.toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static String faultInjectorHandling() {
double random = ThreadLocalRandom.current().nextDouble();
int choice = (int) (random * 100);
if (choice >= 25) {
return "f";
} else if (choice >= 1) {
if (random <= 0.34D) {
return "n";
} else if (random <= 0.67D) {
return "nc";
} else {
return "na";
}
} else {
if (random <= 0.25D) {
return "p";
} else if (random <= 0.50D) {
return "pc";
} else if (random <= 0.75D) {
return "pa";
} else {
return "pn";
}
}
}
}
private static boolean shouldRun() {
String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT);
return ENVIRONMENT.getTestMode() == TestMode.LIVE
&& !osName.contains("mac os")
&& !osName.contains("darwin");
}
} |
why not? doesn't really matter in tests, but want to check if I miss something | public void downloadToFileWithFaultInjection() throws IOException, InterruptedException {
byte[] realFileBytes = new byte[9 * Constants.MB - 1];
ThreadLocalRandom.current().nextBytes(realFileBytes);
containerClient.getBlobClient(containerClient.getBlobContainerName())
.upload(BinaryData.fromBytes(realFileBytes), true);
BlobClient downloadClient = new BlobClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.containerName(containerClient.getBlobContainerName())
.blobName(containerClient.getBlobContainerName())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(new HttpFaultInjectingHttpClient(getFaultInjectingWrappedHttpClient()))
.retryOptions(new RequestRetryOptions(RetryPolicyType.FIXED, 4, null, 10L, 10L, null))
.buildClient();
List<File> files = new ArrayList<>(500);
for (int i = 0; i < 500; i++) {
File file = File.createTempFile(CoreUtils.randomUuid().toString() + i, ".txt");
file.deleteOnExit();
files.add(file);
}
AtomicInteger successCount = new AtomicInteger();
Set<OpenOption> overwriteOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING,
StandardOpenOption.READ, StandardOpenOption.WRITE));
ExecutorService executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
executorService.invokeAll(files.stream().map(it -> (Callable<Void>) () -> {
try {
downloadClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(it.getAbsolutePath())
.setOpenOptions(overwriteOptions)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxConcurrency(2)),
null, Context.NONE);
byte[] actualFileBytes = Files.readAllBytes(it.toPath());
TestUtils.assertArraysEqual(realFileBytes, actualFileBytes);
LOGGER.atVerbose().log(() -> "Successful complete count: " + successCount.incrementAndGet());
Files.deleteIfExists(it.toPath());
} catch (Exception ex) {
LOGGER.atWarning()
.log(() -> "Failed to complete download, target download file: " + it.getAbsolutePath(), ex);
}
return null;
}).collect(Collectors.toList()));
executorService.shutdown();
executorService.awaitTermination(10, TimeUnit.MINUTES);
assertTrue(successCount.get() >= 450);
files.forEach(it -> {
try {
Files.deleteIfExists(it.toPath());
} catch (IOException e) {
LOGGER.atWarning().log(() -> "Failed to delete file: " + it.getAbsolutePath(), e);
}
});
} | LOGGER.atVerbose().log(() -> "Successful complete count: " + successCount.incrementAndGet()); | public void downloadToFileWithFaultInjection() throws IOException, InterruptedException {
byte[] realFileBytes = new byte[9 * Constants.MB - 1];
ThreadLocalRandom.current().nextBytes(realFileBytes);
containerClient.getBlobClient(containerClient.getBlobContainerName())
.upload(BinaryData.fromBytes(realFileBytes), true);
BlobClient downloadClient = new BlobClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.containerName(containerClient.getBlobContainerName())
.blobName(containerClient.getBlobContainerName())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(new HttpFaultInjectingHttpClient(getFaultInjectingWrappedHttpClient()))
.retryOptions(new RequestRetryOptions(RetryPolicyType.FIXED, 4, null, 10L, 10L, null))
.buildClient();
List<File> files = new ArrayList<>(500);
for (int i = 0; i < 500; i++) {
File file = File.createTempFile(CoreUtils.randomUuid().toString() + i, ".txt");
file.deleteOnExit();
files.add(file);
}
AtomicInteger successCount = new AtomicInteger();
Set<OpenOption> overwriteOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING,
StandardOpenOption.READ, StandardOpenOption.WRITE));
ExecutorService executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
executorService.invokeAll(files.stream().map(it -> (Callable<Void>) () -> {
try {
downloadClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(it.getAbsolutePath())
.setOpenOptions(overwriteOptions)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxConcurrency(2)),
null, Context.NONE);
byte[] actualFileBytes = Files.readAllBytes(it.toPath());
TestUtils.assertArraysEqual(realFileBytes, actualFileBytes);
LOGGER.atVerbose()
.addKeyValue("successCount", successCount.incrementAndGet())
.log("Download completed successfully.");
Files.deleteIfExists(it.toPath());
} catch (Exception ex) {
LOGGER.atWarning()
.addKeyValue("downloadFile", it.getAbsolutePath())
.log("Failed to complete download.", ex);
}
return null;
}).collect(Collectors.toList()));
executorService.shutdown();
executorService.awaitTermination(10, TimeUnit.MINUTES);
assertTrue(successCount.get() >= 450);
files.forEach(it -> {
try {
Files.deleteIfExists(it.toPath());
} catch (IOException e) {
LOGGER.atWarning()
.addKeyValue("file", it.getAbsolutePath())
.log("Failed to delete file.", e);
}
});
} | class HttpFaultInjectingTests {
private static final ClientLogger LOGGER = new ClientLogger(HttpFaultInjectingTests.class);
private static final HttpHeaderName UPSTREAM_URI_HEADER = HttpHeaderName.fromString("X-Upstream-Base-Uri");
private static final HttpHeaderName HTTP_FAULT_INJECTOR_RESPONSE_HEADER
= HttpHeaderName.fromString("x-ms-faultinjector-response-option");
private BlobContainerClient containerClient;
@BeforeEach
public void setup() {
String testName = ("httpFaultInjectingTests" + CoreUtils.randomUuid().toString().replace("-", ""))
.toLowerCase();
containerClient = new BlobServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(BlobTestBase.getHttpClient(() -> {
throw new RuntimeException("Test should not run during playback.");
}))
.buildClient()
.createBlobContainer(testName);
}
@AfterEach
public void teardown() {
if (containerClient != null) {
containerClient.delete();
}
}
/**
* Tests downloading to file with fault injection.
* <p>
* This test will upload a single blob of about 9MB and then download it in parallel 500 times. Each download will
* have its file contents compared to the original blob data. The test only cares about files that were properly
* downloaded, if a download fails with a network error it will be ignored. A requirement of 90% of files being
* successfully downloaded is also a requirement to prevent a case where most files failed to download and passing,
* hiding a true issue.
*/
@Test
@SuppressWarnings("unchecked")
private HttpClient getFaultInjectingWrappedHttpClient() {
switch (ENVIRONMENT.getHttpClientType()) {
case NETTY:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(NettyAsyncHttpClientProvider.class));
case OK_HTTP:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(OkHttpAsyncClientProvider.class));
case VERTX:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(VertxAsyncHttpClientProvider.class));
case JDK_HTTP:
try {
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider((Class<? extends HttpClientProvider>) Class.forName(
"com.azure.core.http.jdk.httpclient.JdkHttpClientProvider")));
} catch (ClassNotFoundException e) {
throw new IllegalStateException(e);
}
default:
throw new IllegalArgumentException("Unknown http client type: " + ENVIRONMENT.getHttpClientType());
}
}
private static final class HttpFaultInjectingHttpClient implements HttpClient {
private final HttpClient wrappedHttpClient;
HttpFaultInjectingHttpClient(HttpClient wrappedHttpClient) {
this.wrappedHttpClient = wrappedHttpClient;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
public Mono<HttpResponse> send(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
return wrappedHttpClient.send(request, context)
.map(response -> {
HttpRequest request1 = response.getRequest();
request1.getHeaders().remove(UPSTREAM_URI_HEADER);
request1.setUrl(originalUrl);
return response;
});
}
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
HttpResponse response = wrappedHttpClient.sendSync(request, context);
response.getRequest().setUrl(originalUrl);
response.getRequest().getHeaders().remove(UPSTREAM_URI_HEADER);
return response;
}
private static URL rewriteUrl(URL originalUrl) {
try {
return UrlBuilder.parse(originalUrl)
.setScheme("http")
.setHost("localhost")
.setPort(7777)
.toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static String faultInjectorHandling() {
double random = ThreadLocalRandom.current().nextDouble();
int choice = (int) (random * 100);
if (choice >= 25) {
return "f";
} else if (choice >= 1) {
if (random <= 0.34D) {
return "n";
} else if (random <= 0.67D) {
return "nc";
} else {
return "na";
}
} else {
if (random <= 0.25D) {
return "p";
} else if (random <= 0.50D) {
return "pc";
} else if (random <= 0.75D) {
return "pa";
} else {
return "pn";
}
}
}
}
private static boolean shouldRun() {
String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT);
return ENVIRONMENT.getTestMode() == TestMode.LIVE
&& !osName.contains("mac os")
&& !osName.contains("darwin");
}
} | class HttpFaultInjectingTests {
private static final ClientLogger LOGGER = new ClientLogger(HttpFaultInjectingTests.class);
private static final HttpHeaderName UPSTREAM_URI_HEADER = HttpHeaderName.fromString("X-Upstream-Base-Uri");
private static final HttpHeaderName HTTP_FAULT_INJECTOR_RESPONSE_HEADER
= HttpHeaderName.fromString("x-ms-faultinjector-response-option");
private BlobContainerClient containerClient;
@BeforeEach
public void setup() {
String testName = ("httpFaultInjectingTests" + CoreUtils.randomUuid().toString().replace("-", ""))
.toLowerCase();
containerClient = new BlobServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(BlobTestBase.getHttpClient(() -> {
throw new RuntimeException("Test should not run during playback.");
}))
.buildClient()
.createBlobContainer(testName);
}
@AfterEach
public void teardown() {
if (containerClient != null) {
containerClient.delete();
}
}
/**
* Tests downloading to file with fault injection.
* <p>
* This test will upload a single blob of about 9MB and then download it in parallel 500 times. Each download will
* have its file contents compared to the original blob data. The test only cares about files that were properly
* downloaded, if a download fails with a network error it will be ignored. A requirement of 90% of files being
* successfully downloaded is also a requirement to prevent a case where most files failed to download and passing,
* hiding a true issue.
*/
@Test
@SuppressWarnings("unchecked")
private HttpClient getFaultInjectingWrappedHttpClient() {
switch (ENVIRONMENT.getHttpClientType()) {
case NETTY:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(NettyAsyncHttpClientProvider.class));
case OK_HTTP:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(OkHttpAsyncClientProvider.class));
case VERTX:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(VertxAsyncHttpClientProvider.class));
case JDK_HTTP:
try {
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider((Class<? extends HttpClientProvider>) Class.forName(
"com.azure.core.http.jdk.httpclient.JdkHttpClientProvider")));
} catch (ClassNotFoundException e) {
throw new IllegalStateException(e);
}
default:
throw new IllegalArgumentException("Unknown http client type: " + ENVIRONMENT.getHttpClientType());
}
}
private static final class HttpFaultInjectingHttpClient implements HttpClient {
private final HttpClient wrappedHttpClient;
HttpFaultInjectingHttpClient(HttpClient wrappedHttpClient) {
this.wrappedHttpClient = wrappedHttpClient;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
public Mono<HttpResponse> send(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
return wrappedHttpClient.send(request, context)
.map(response -> {
HttpRequest request1 = response.getRequest();
request1.getHeaders().remove(UPSTREAM_URI_HEADER);
request1.setUrl(originalUrl);
return response;
});
}
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
HttpResponse response = wrappedHttpClient.sendSync(request, context);
response.getRequest().setUrl(originalUrl);
response.getRequest().getHeaders().remove(UPSTREAM_URI_HEADER);
return response;
}
private static URL rewriteUrl(URL originalUrl) {
try {
return UrlBuilder.parse(originalUrl)
.setScheme("http")
.setHost("localhost")
.setPort(7777)
.toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static String faultInjectorHandling() {
double random = ThreadLocalRandom.current().nextDouble();
int choice = (int) (random * 100);
if (choice >= 25) {
return "f";
} else if (choice >= 1) {
if (random <= 0.34D) {
return "n";
} else if (random <= 0.67D) {
return "nc";
} else {
return "na";
}
} else {
if (random <= 0.25D) {
return "p";
} else if (random <= 0.50D) {
return "pc";
} else if (random <= 0.75D) {
return "pa";
} else {
return "pn";
}
}
}
}
private static boolean shouldRun() {
String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT);
return ENVIRONMENT.getTestMode() == TestMode.LIVE
&& !osName.contains("mac os")
&& !osName.contains("darwin");
}
} |
I would argue this should be logged as error | private void handleDiagnostics(Context context, CosmosDiagnosticsContext cosmosCtx) {
if (this.diagnosticHandlers != null && this.diagnosticHandlers.size() > 0) {
for (CosmosDiagnosticsHandler handler: this.diagnosticHandlers) {
try {
handler.handleDiagnostics(cosmosCtx, context);
} catch (Exception e) {
LOGGER.warn("HandledDiagnostics failed. ", e);
}
}
}
} | LOGGER.warn("HandledDiagnostics failed. ", e); | private void handleDiagnostics(Context context, CosmosDiagnosticsContext cosmosCtx) {
if (this.diagnosticHandlers != null && this.diagnosticHandlers.size() > 0) {
for (CosmosDiagnosticsHandler handler: this.diagnosticHandlers) {
try {
handler.handleDiagnostics(cosmosCtx, context);
} catch (Exception e) {
LOGGER.error("HandledDiagnostics failed. ", e);
}
}
}
} | class DiagnosticsProvider {
private static final ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor clientTelemetryConfigAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private static final ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final Logger LOGGER = LoggerFactory.getLogger(DiagnosticsProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String LEGACY_DB_URL = "db.url";
public static final String LEGACY_DB_STATEMENT = "db.statement";
public final static String LEGACY_DB_INSTANCE = "db.instance";
private final static Duration FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD = Duration.ofMillis(5);
private static final String REACTOR_TRACING_CONTEXT_KEY = "tracing-context";
private static final String COSMOS_DIAGNOSTICS_CONTEXT_KEY = "azure-cosmos-context";
private static final Object DUMMY_VALUE = new Object();
private final Mono<Object> propagatingMono;
private final Flux<Object> propagatingFlux;
private final ArrayList<CosmosDiagnosticsHandler> diagnosticHandlers;
private final Tracer tracer;
private final CosmosTracer cosmosTracer;
private final CosmosClientTelemetryConfig telemetryConfig;
public DiagnosticsProvider(
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientId,
String userAgent,
ConnectionMode connectionMode) {
checkNotNull(clientTelemetryConfig, "Argument 'clientTelemetryConfig' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.telemetryConfig = clientTelemetryConfig;
this.diagnosticHandlers = new ArrayList<>(
clientTelemetryConfigAccessor.getDiagnosticHandlers(clientTelemetryConfig));
Tracer tracerCandidate = clientTelemetryConfigAccessor.getOrCreateTracer(clientTelemetryConfig);
if (tracerCandidate.isEnabled()) {
this.tracer = tracerCandidate;
} else {
if (!this.diagnosticHandlers.isEmpty()) {
this.tracer = EnabledNoOpTracer.INSTANCE;
} else {
this.tracer = tracerCandidate;
}
}
if (this.tracer.isEnabled()) {
if (clientTelemetryConfigAccessor.isLegacyTracingEnabled(clientTelemetryConfig)) {
this.cosmosTracer = new LegacyCosmosTracer(this.tracer);
} else {
this.cosmosTracer = new OpenTelemetryCosmosTracer(
this.tracer,
clientTelemetryConfig,
clientId,
userAgent,
connectionMode.name().toLowerCase(Locale.ROOT));
}
} else {
this.cosmosTracer = null;
}
this.propagatingMono = new PropagatingMono();
this.propagatingFlux = new PropagatingFlux();
}
public boolean isEnabled() {
return this.tracer.isEnabled();
}
public boolean isRealTracer() {
return this.tracer.isEnabled() && this.tracer != EnabledNoOpTracer.INSTANCE;
}
public String getTraceConfigLog() {
StringBuilder sb = new StringBuilder();
sb.append(this.isEnabled());
sb.append(", ");
sb.append(this.isRealTracer());
sb.append(", ");
sb.append(this.tracer.getClass().getCanonicalName());
if (!this.diagnosticHandlers.isEmpty()) {
sb.append(", [");
for (int i = 0; i < this.diagnosticHandlers.size(); i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(this.diagnosticHandlers.get(i).getClass().getCanonicalName());
}
sb.append("]");
}
return sb.toString();
}
public CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.telemetryConfig;
}
/**
* Gets {@link Context} from Reactor {@link ContextView}.
*
* @param reactorContext Reactor context instance.
* @return {@link Context} from reactor context or null if not present.
*/
public static Context getContextFromReactorOrNull(ContextView reactorContext) {
Object context = reactorContext.getOrDefault(REACTOR_TRACING_CONTEXT_KEY, null);
if (context instanceof Context) {
return (Context) context;
}
return null;
}
/**
* Stores {@link Context} in Reactor {@link reactor.util.context.Context}.
*
* @param traceContext {@link Context} context with trace context to store.
* @return {@link reactor.util.context.Context} Reactor context with trace context.
*/
public static reactor.util.context.Context setContextInReactor(Context traceContext) {
return reactor.util.context.Context.of(REACTOR_TRACING_CONTEXT_KEY, traceContext);
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <br/>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(
String spanName,
CosmosDiagnosticsContext cosmosCtx,
Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
ctxAccessor.startOperation(cosmosCtx);
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
if (this.cosmosTracer == null) {
return local;
}
return this.cosmosTracer.startSpan(spanName, cosmosCtx, local);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T> void endSpan(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics
) {
try {
this.endSpanCore(signal, cosmosCtx, statusCode, actualItemCount, requestCharge, diagnostics);
} catch (Throwable error) {
this.handleErrors(error, 9901);
}
}
private <T> void endSpanCore(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics
) {
Objects.requireNonNull(signal, "'signal' cannot be null.");
Context context = getContextFromReactorOrNull(signal.getContextView());
if (context == null) {
return;
}
switch (signal.getType()) {
case ON_COMPLETE:
end(
cosmosCtx,
statusCode,
0,
actualItemCount,
requestCharge,
diagnostics,
null,
context,
ctxAccessor.isEmptyCompletion(cosmosCtx));
break;
case ON_NEXT:
end(
cosmosCtx,
statusCode,
0,
actualItemCount,
requestCharge,
diagnostics,
null,
context,
false);
break;
case ON_ERROR:
Throwable throwable = null;
int subStatusCode = 0;
Double effectiveRequestCharge = requestCharge;
CosmosDiagnostics effectiveDiagnostics = diagnostics;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
subStatusCode = exception.getSubStatusCode();
if (effectiveRequestCharge != null) {
effectiveRequestCharge += exception.getRequestCharge();
} else {
effectiveRequestCharge = exception.getRequestCharge();
}
effectiveDiagnostics = exception.getDiagnostics();
if (effectiveDiagnostics != null) {
diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(effectiveDiagnostics).set(true);
}
}
}
end(
cosmosCtx,
statusCode,
subStatusCode,
actualItemCount,
effectiveRequestCharge,
effectiveDiagnostics,
throwable,
context,
false);
break;
default:
break;
}
}
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, Throwable throwable) {
try {
int statusCode = DiagnosticsProvider.ERROR_CODE;
int subStatusCode = 0;
Double effectiveRequestCharge = null;
CosmosDiagnostics effectiveDiagnostics = null;
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
subStatusCode = exception.getSubStatusCode();
effectiveRequestCharge = exception.getRequestCharge();
effectiveDiagnostics = exception.getDiagnostics();
}
end(
cosmosCtx,
statusCode,
subStatusCode,
null,
effectiveRequestCharge,
effectiveDiagnostics,
throwable,
context,
false);
} catch (Throwable error) {
this.handleErrors(error, 9905);
}
}
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isForcedEmptyCompletion) {
try {
end(
cosmosCtx,
200,
0,
null,
null,
null,
null,
context,
isForcedEmptyCompletion);
} catch (Throwable error) {
this.handleErrors(error, 9904);
}
}
public void recordPage(
CosmosDiagnosticsContext cosmosCtx,
CosmosDiagnostics diagnostics,
Integer actualItemCount,
Double requestCharge
) {
try {
this.recordPageCore(cosmosCtx, diagnostics, actualItemCount, requestCharge);
} catch (Throwable error) {
this.handleErrors(error, 9902);
}
}
private void recordPageCore(
CosmosDiagnosticsContext cosmosCtx,
CosmosDiagnostics diagnostics,
Integer actualItemCount,
Double requestCharge
) {
ctxAccessor.recordOperation(
cosmosCtx, 200, 0, actualItemCount, requestCharge, diagnostics, null);
}
public <T> void recordFeedResponseConsumerLatency(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
Duration feedResponseConsumerLatency
) {
try {
Objects.requireNonNull(signal, "'signal' cannot be null.");
Objects.requireNonNull(feedResponseConsumerLatency, "'feedResponseConsumerLatency' cannot be null.");
checkArgument(
signal.getType() == SignalType.ON_COMPLETE || signal.getType() == SignalType.ON_ERROR,
"recordFeedResponseConsumerLatency should only be used for terminal signal");
Context context = getContextFromReactorOrNull(signal.getContextView());
if (context == null) {
return;
}
this.recordFeedResponseConsumerLatencyCore(
context, cosmosCtx, feedResponseConsumerLatency);
} catch (Throwable error) {
this.handleErrors(error, 9902);
}
}
private void recordFeedResponseConsumerLatencyCore(
Context context,
CosmosDiagnosticsContext cosmosCtx,
Duration feedResponseConsumerLatency
) {
Objects.requireNonNull(cosmosCtx, "'cosmosCtx' cannot be null.");
Objects.requireNonNull(feedResponseConsumerLatency, "'feedResponseConsumerLatency' cannot be null.");
if (feedResponseConsumerLatency.compareTo(FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD) <= 0 &&
!LOGGER.isDebugEnabled()) {
return;
}
if (feedResponseConsumerLatency.compareTo(FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD) <= 0 &&
LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Total duration spent in FeedResponseConsumer is {} but does not exceed threshold of {}, Diagnostics: {}",
feedResponseConsumerLatency,
FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD,
cosmosCtx);
return;
}
if (context != null && this.isRealTracer()) {
Map<String, Object> attributes = new HashMap<>();
String trigger = "SlowFeedResponse";
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
return;
}
LOGGER.warn(
"Total duration spent in FeedResponseConsumer is {} and exceeds threshold of {}, Diagnostics: {}",
feedResponseConsumerLatency,
FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD,
cosmosCtx);
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(
Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String containerId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions) {
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
null,
null,
(r) -> r.getStatusCode(),
(r) -> null,
(r) -> r.getRequestCharge(),
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
public <T extends CosmosBatchResponse> Mono<T> traceEnabledBatchResponsePublisher(
Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String containerId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions) {
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
null,
null,
CosmosBatchResponse::getStatusCode,
(r) -> null,
CosmosBatchResponse::getRequestCharge,
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(
Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions,
String trackingId) {
checkNotNull(requestOptions, "Argument 'requestOptions' must not be null.");
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
trackingId,
null,
CosmosItemResponse::getStatusCode,
(r) -> null,
CosmosItemResponse::getRequestCharge,
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
/**
* Runs given {@code Flux<T>} publisher in the scope of trace context passed in using
* {@link DiagnosticsProvider
* Populates active trace context on Reactor's hot path. Reactor's instrumentation for OpenTelemetry
* (or other hypothetical solution) will take care of the cold path.
*
* @param publisher publisher to run.
* @return wrapped publisher.
*/
public <T> Flux<T> runUnderSpanInContext(Flux<T> publisher) {
return propagatingFlux.flatMap(ignored -> publisher);
}
public boolean shouldSampleOutOperation(CosmosPagedFluxOptions options) {
final double samplingRateSnapshot = clientTelemetryConfigAccessor.getSamplingRate(this.telemetryConfig);
options.setSamplingRateSnapshot(samplingRateSnapshot);
return shouldSampleOutOperation(samplingRateSnapshot);
}
private boolean shouldSampleOutOperation(double samplingRate) {
if (samplingRate == 1) {
return false;
}
if (samplingRate == 0) {
return true;
}
return ThreadLocalRandom.current().nextDouble() >= samplingRate;
}
private <T> Mono<T> diagnosticsEnabledPublisher(
CosmosDiagnosticsContext cosmosCtx,
Mono<T> resultPublisher,
Context context,
String spanName,
Function<T, Integer> statusCodeFunc,
Function<T, Integer> actualItemCountFunc,
Function<T, Double> requestChargeFunc,
BiFunction<T, Double, CosmosDiagnostics> diagnosticsFunc
) {
if (!isEnabled()) {
return resultPublisher;
}
final double samplingRateSnapshot = clientTelemetryConfigAccessor.getSamplingRate(this.telemetryConfig);
if (cosmosCtx != null) {
ctxAccessor.setSamplingRateSnapshot(cosmosCtx, samplingRateSnapshot);
}
if (shouldSampleOutOperation(samplingRateSnapshot)) {
return resultPublisher;
}
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
if (isNestedCall) {
return resultPublisher;
}
return propagatingMono
.flatMap(ignored -> resultPublisher)
.doOnEach(signal -> {
switch (signal.getType()) {
case ON_NEXT:
T response = signal.get();
this.endSpan(
signal,
cosmosCtx,
statusCodeFunc.apply(response),
actualItemCountFunc.apply(response),
requestChargeFunc.apply(response),
diagnosticsFunc.apply(response, samplingRateSnapshot));
break;
case ON_ERROR:
this.endSpan(
signal,
cosmosCtx,
ERROR_CODE,
null,
null,
null);
break;
default:
break;
}})
.contextWrite(setContextInReactor(this.startSpan(spanName, cosmosCtx, context)));
}
private <T> Mono<T> publisherWithDiagnostics(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String accountName,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
String trackingId,
Integer maxItemCount,
Function<T, Integer> statusCodeFunc,
Function<T, Integer> actualItemCountFunc,
Function<T, Double> requestChargeFunc,
BiFunction<T, Double, CosmosDiagnostics> diagnosticFunc,
RequestOptions requestOptions) {
CosmosDiagnosticsThresholds thresholds = requestOptions != null
? clientAccessor.getEffectiveDiagnosticsThresholds(client, requestOptions.getDiagnosticsThresholds())
: clientAccessor.getEffectiveDiagnosticsThresholds(client, null);
CosmosDiagnosticsContext cosmosCtx = ctxAccessor.create(
spanName,
accountName,
BridgeInternal.getServiceEndpoint(client),
databaseId,
containerId,
resourceType,
operationType,
null,
clientAccessor.getEffectiveConsistencyLevel(client, operationType, consistencyLevel),
maxItemCount,
thresholds,
trackingId,
clientAccessor.getConnectionMode(client),
clientAccessor.getUserAgent(client),
null);
if (requestOptions != null) {
requestOptions.setDiagnosticsContextSupplier(() -> cosmosCtx);
}
return diagnosticsEnabledPublisher(
cosmosCtx,
resultPublisher,
context,
spanName,
statusCodeFunc,
actualItemCountFunc,
requestChargeFunc,
diagnosticFunc);
}
private void end(
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
int subStatusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics,
Throwable throwable,
Context context,
boolean isForcedEmptyCompletion) {
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
if (ctxAccessor.endOperation(
cosmosCtx,
statusCode,
subStatusCode,
actualItemCount,
requestCharge,
diagnostics,
throwable)) {
if (!isForcedEmptyCompletion) {
this.handleDiagnostics(context, cosmosCtx);
}
if (this.cosmosTracer != null) {
this.cosmosTracer.endSpan(cosmosCtx, context, isForcedEmptyCompletion);
}
}
}
private static void subscribe(Tracer tracer, CoreSubscriber<? super Object> actual) {
Context context = getContextFromReactorOrNull(actual.currentContext());
if (context != null) {
AutoCloseable scope = tracer.makeSpanCurrent(context);
try {
actual.onSubscribe(Operators.scalarSubscription(actual, DUMMY_VALUE));
} finally {
try {
scope.close();
} catch (Exception e) {
LOGGER.error("Unexpected failure closing tracer scope.", e);
throw new IllegalStateException("Unexpected failure closing tracer scope.", e);
}
}
} else {
actual.onSubscribe(Operators.scalarSubscription(actual, DUMMY_VALUE));
}
}
/**
* Helper class allowing running Mono subscription (and anything on the hot path)
* in scope of trace context. This enables OpenTelemetry auto-collection
* to pick it up and correlate lower levels of instrumentation and logs
* to logical Cosmos spans.
* <br/>
* OpenTelemetry reactor auto-instrumentation will take care of the cold path.
*/
private final class PropagatingMono extends Mono<Object> {
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
DiagnosticsProvider.subscribe(tracer, actual);
}
}
/**
* Helper class allowing running Flux subscription (and anything on the hot path)
* in scope of trace context. This enables OpenTelemetry auto-collection
* to pick it up and correlate lower levels of instrumentation and logs
* to logical Cosmos spans.
* <br/>
* OpenTelemetry reactor auto-instrumentation will take care of the cold path.
*/
private final class PropagatingFlux extends Flux<Object> {
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
DiagnosticsProvider.subscribe(tracer, actual);
}
}
private interface CosmosTracer {
Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context);
void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion);
}
private static final class LegacyCosmosTracer implements CosmosTracer {
private final static String JSON_STRING = "JSON";
private final Tracer tracer;
public LegacyCosmosTracer(Tracer tracer) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
this.tracer = tracer;
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx.getDatabaseName(),
ctxAccessor.getEndpoint(cosmosCtx));
return tracer.start(spanName, spanOptions, context);
}
private StartSpanOptions startSpanOptions(String methodName, String databaseId, String endpoint) {
StartSpanOptions spanOptions = new StartSpanOptions(SpanKind.CLIENT)
.setAttribute(DB_TYPE, DB_TYPE_VALUE)
.setAttribute(LEGACY_DB_URL, endpoint)
.setAttribute(LEGACY_DB_STATEMENT, methodName);
if (databaseId != null) {
spanOptions.setAttribute(LEGACY_DB_INSTANCE, databaseId);
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
try {
if (cosmosCtx != null && cosmosCtx.isThresholdViolated()) {
Collection<CosmosDiagnostics> diagnostics = cosmosCtx.getDiagnostics();
if (diagnostics != null && diagnostics.size() > 0) {
for (CosmosDiagnostics d: diagnostics) {
addDiagnosticsOnTracerEvent(d, context);
}
}
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer.", ex);
}
if (cosmosCtx != null) {
tracer.end(cosmosCtx.getStatusCode(), cosmosCtx.getFinalError(), context);
}
}
private void addClientSideRequestStatisticsOnTracerEvent(
ClientSideRequestStatistics clientSideRequestStatistics,
Context context) throws JsonProcessingException {
if (clientSideRequestStatistics == null || context == null) {
return;
}
Map<String, Object> attributes;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator = storeResponseStatistics.getStoreResult().getStoreResponseDiagnostics().getRequestTimeline().iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
for (RequestTimeline.Event event :
statistics.getStoreResult().getStoreResponseDiagnostics().getRequestTimeline()) {
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : clientSideRequestStatistics.getGatewayStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(gatewayStats));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (gatewayStats.getRequestTimeline() != null) {
for (RequestTimeline.Event event : gatewayStats.getRequestTimeline()) {
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getContactedRegionNames()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientConfig()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
if (clientSideRequestStatistics.getResponseStatisticsList() != null && clientSideRequestStatistics.getResponseStatisticsList().size() > 0
&& clientSideRequestStatistics.getResponseStatisticsList().iterator().next() != null) {
String eventName =
"Diagnostics for PKRange "
+ clientSideRequestStatistics.getResponseStatisticsList().iterator().next().getStoreResult().getStoreResponseDiagnostics().getPartitionKeyRangeId();
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
} else if (clientSideRequestStatistics.getGatewayStatisticsList() != null && clientSideRequestStatistics.getGatewayStatisticsList().size() > 0) {
String eventName =
"Diagnostics for PKRange " + clientSideRequestStatistics.getGatewayStatisticsList().get(0).getPartitionKeyRangeId();
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
} else {
String eventName = "Diagnostics ";
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null || context == null) {
return;
}
Map<String, Object> attributes;
FeedResponseDiagnostics feedResponseDiagnostics =
diagnosticsAccessor.getFeedResponseDiagnostics(cosmosDiagnostics);
if (feedResponseDiagnostics != null) {
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics = feedResponseDiagnostics
.getQueryPlanDiagnosticsContext();
if (queryPlanDiagnostics != null) {
attributes = new HashMap<>();
attributes.put("JSON",
mapper.writeValueAsString(queryPlanDiagnostics));
this.addEvent(
"Query Plan Statistics",
attributes,
OffsetDateTime.ofInstant(queryPlanDiagnostics.getStartTimeUTC(), ZoneOffset.UTC),
context);
}
Map<String, QueryMetrics> queryMetrics = feedResponseDiagnostics.getQueryMetricsMap();
if (queryMetrics != null && queryMetrics.size() > 0) {
for(Map.Entry<String, QueryMetrics> entry : queryMetrics.entrySet()) {
attributes = new HashMap<>();
attributes.put("Query Metrics", entry.getValue().toString());
this.addEvent("Query Metrics for PKRange " + entry.getKey(), attributes,
OffsetDateTime.now(), context);
}
}
for (ClientSideRequestStatistics c: feedResponseDiagnostics.getClientSideRequestStatistics()) {
addClientSideRequestStatisticsOnTracerEvent(c, context);
}
}
addClientSideRequestStatisticsOnTracerEvent(
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics),
context);
}
void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
}
private static void emitDiagnosticsEvents(Tracer tracer, CosmosDiagnosticsContext cosmosCtx, String trigger, Context context) {
Map<String, Object> attributes = new HashMap<>();
String message = trigger + " - CTX: " + cosmosCtx.toJson();
List<String> messageFragments = Splitter.fixedLength(Configs.getMaxTraceMessageLength()).splitToList(message);
attributes.put("Trigger", trigger);
for (int i = 0; i < messageFragments.size(); i++) {
attributes.put("SequenceNumber", String.format(Locale.ROOT,"%05d", i + 1));
tracer.addEvent(messageFragments.get(i), attributes, OffsetDateTime.now(), context);
}
}
private final static class OpenTelemetryCosmosTracer implements CosmosTracer {
private final Tracer tracer;
private final CosmosClientTelemetryConfig config;
private final String clientId;
private final String connectionMode;
private final String userAgent;
public OpenTelemetryCosmosTracer(
Tracer tracer,
CosmosClientTelemetryConfig config,
String clientId,
String userAgent,
String connectionMode) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
checkNotNull(config, "Argument 'config' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.tracer = tracer;
this.config = config;
this.clientId = clientId;
this.userAgent = userAgent;
this.connectionMode = connectionMode;
}
private boolean isTransportLevelTracingEnabled() {
return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config);
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx);
return tracer.start(spanName, spanOptions, local);
}
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) {
StartSpanOptions spanOptions;
if (tracer instanceof EnabledNoOpTracer) {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL);
} else {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL)
.setAttribute("db.system", "cosmosdb")
.setAttribute("db.operation", spanName)
.setAttribute("net.peer.name", cosmosCtx.getAccountName())
.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType())
.setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType())
.setAttribute("db.name", cosmosCtx.getDatabaseName())
.setAttribute("db.cosmosdb.client_id", this.clientId)
.setAttribute("user_agent.original", this.userAgent)
.setAttribute("db.cosmosdb.connection_mode", this.connectionMode);
if (!cosmosCtx.getOperationId().isEmpty() &&
!cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) {
spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId());
}
String containerName = cosmosCtx.getContainerName();
if (containerName != null) {
spanOptions.setAttribute("db.cosmosdb.container", containerName);
}
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
if (cosmosCtx == null) {
return;
}
if (!cosmosCtx.isCompleted()) {
tracer.end("CosmosCtx not completed yet.", null, context);
return;
}
String errorMessage = null;
Throwable finalError = cosmosCtx.getFinalError();
if (finalError != null && cosmosCtx.isFailure()) {
if (finalError instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) finalError;
errorMessage = cosmosException.getShortMessage();
} else {
errorMessage = finalError.getMessage();
}
}
if (tracer instanceof EnabledNoOpTracer) {
tracer.end(errorMessage, finalError, context);
return;
}
if (isEmptyCompletion) {
tracer.setAttribute(
"db.cosmosdb.is_empty_completion",
Boolean.toString(true),
context);
tracer.end(errorMessage, finalError, context);
return;
}
if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) {
String trigger;
if (cosmosCtx.isFailure()) {
trigger = "Failure";
} else {
trigger = "ThresholdViolation";
}
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
}
if (finalError != null) {
String exceptionType;
if (finalError instanceof CosmosException) {
exceptionType = CosmosException.class.getCanonicalName();
} else {
exceptionType = finalError.getClass().getCanonicalName();
}
tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context);
tracer.setAttribute("exception.type", exceptionType, context);
if (errorMessage != null) {
tracer.setAttribute("exception.message", errorMessage, context);
}
tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context);
}
if (this.isTransportLevelTracingEnabled()) {
traceTransportLevel(cosmosCtx, context);
}
tracer.setAttribute(
"db.cosmosdb.status_code",
Integer.toString(cosmosCtx.getStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.sub_status_code",
Integer.toString(cosmosCtx.getSubStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.request_charge",
Float.toString(cosmosCtx.getTotalRequestCharge()),
context);
tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context);
Set<String> regionsContacted = cosmosCtx.getContactedRegionNames();
if (!regionsContacted.isEmpty()) {
tracer.setAttribute(
"db.cosmosdb.regions_contacted",
String.join(", ", regionsContacted),
context);
}
tracer.end(errorMessage, finalError, context);
}
private void recordStoreResponseStatistics(
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics,
Context context) {
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Map<String, Object> attributes = new HashMap<>();
attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString());
attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString());
attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString());
attributes.put("rntbd.region", responseStatistics.getRegionName());
if (storeResultDiagnostics.getLsn() > 0) {
attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn()));
}
if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) {
attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN()));
}
String responseSessionToken = responseStatistics.getRequestSessionToken();
if (responseSessionToken != null && !responseSessionToken.isEmpty()) {
attributes.put("rntbd.session_token", responseSessionToken);
}
String requestSessionToken = responseStatistics.getRequestSessionToken();
if (requestSessionToken != null && !requestSessionToken.isEmpty()) {
attributes.put("rntbd.request_session_token", requestSessionToken);
}
String activityId = storeResponseDiagnostics.getActivityId();
if (activityId != null && !activityId.isEmpty()) {
attributes.put("rntbd.activity_id", activityId);
}
String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
if (pkRangeId != null && !pkRangeId.isEmpty()) {
attributes.put("rntbd.partition_key_range_id", pkRangeId);
}
attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode()));
if (storeResponseDiagnostics.getSubStatusCode() != 0) {
attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode()));
}
if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) {
attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId());
}
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
attributes.put("rntbd.backend_latency", Double.toString(backendLatency));
}
double requestCharge = storeResponseDiagnostics.getRequestCharge();
attributes.put("rntbd.request_charge", Double.toString(requestCharge));
Duration latency = responseStatistics.getDuration();
if (latency != null) {
attributes.put("rntbd.latency", latency.toString());
}
if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) {
attributes.put(
"rntbd.is_new_channel",
storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit());
}
OffsetDateTime startTime = null;
for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) {
OffsetDateTime eventTime = event.getStartTime() != null ?
event.getStartTime().atOffset(ZoneOffset.UTC) : null;
if (eventTime != null &&
(startTime == null || startTime.isBefore(eventTime))) {
startTime = eventTime;
}
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString());
}
attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength());
attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength());
this.tracer.addEvent(
"rntbd.request",
attributes,
startTime != null ? startTime : OffsetDateTime.now(),
context);
}
}
private void traceTransportLevelRequests(
Collection<ClientSideRequestStatistics> clientSideRequestStatistics,
Context context) {
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
requestStatistics.getResponseStatisticsList(),
context);
recordStoreResponseStatistics(
requestStatistics.getSupplementalResponseStatisticsList(),
context);
}
}
}
private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) {
Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics =
ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext);
traceTransportLevelRequests(
combinedClientSideRequestStatistics,
context);
}
}
public static String prettifyCallstack(Throwable e) {
StringWriter stackWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stackWriter);
e.printStackTrace(printWriter);
printWriter.flush();
stackWriter.flush();
String prettifiedCallstack = stackWriter.toString();
String message = e.toString();
if (prettifiedCallstack.length() > message.length()) {
prettifiedCallstack = prettifiedCallstack.substring(message.length());
}
printWriter.close();
try {
stackWriter.close();
} catch (IOException closeError) {
LOGGER.warn("Error trying to close StringWriter.", closeError);
}
return prettifiedCallstack;
}
private void handleErrors(Throwable throwable, int systemExitCode) {
if (throwable instanceof Error) {
LOGGER.error("Unexpected error in DiagnosticsProvider.endSpan. ", throwable);
System.err.println("Unexpected error in DiagnosticsProvider.endSpan. " + throwable);
System.exit(systemExitCode);
} else {
LOGGER.error("Unexpected exception in DiagnosticsProvider.endSpan. ", throwable);
throw new RuntimeException(throwable);
}
}
private static final class EnabledNoOpTracer implements Tracer {
public static final Tracer INSTANCE = new EnabledNoOpTracer();
private EnabledNoOpTracer() {
}
@Override
public Context start(String methodName, Context context) {
return context;
}
@Override
public Context start(String methodName, Context context, ProcessKind processKind) {
return context;
}
@Override
public void end(int responseCode, Throwable error, Context context) {
}
@Override
public void end(String errorCondition, Throwable error, Context context) {
}
@Override
public void setAttribute(String key, String value, Context context) {
}
@Override
public Context setSpanName(String spanName, Context context) {
return Context.NONE;
}
@Override
public void addLink(Context context) {
}
@Override
public Context extractContext(String diagnosticId, Context context) {
return Context.NONE;
}
@Override
public Context getSharedSpanBuilder(String spanName, Context context) {
return Context.NONE;
}
}
} | class DiagnosticsProvider {
private static final ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor clientTelemetryConfigAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private static final ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final Logger LOGGER = LoggerFactory.getLogger(DiagnosticsProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String LEGACY_DB_URL = "db.url";
public static final String LEGACY_DB_STATEMENT = "db.statement";
public final static String LEGACY_DB_INSTANCE = "db.instance";
private final static Duration FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD = Duration.ofMillis(5);
private static final String REACTOR_TRACING_CONTEXT_KEY = "tracing-context";
private static final String COSMOS_DIAGNOSTICS_CONTEXT_KEY = "azure-cosmos-context";
private static final Object DUMMY_VALUE = new Object();
private final Mono<Object> propagatingMono;
private final Flux<Object> propagatingFlux;
private final ArrayList<CosmosDiagnosticsHandler> diagnosticHandlers;
private final Tracer tracer;
private final CosmosTracer cosmosTracer;
private final CosmosClientTelemetryConfig telemetryConfig;
private final boolean shouldSystemExitOnError;
public DiagnosticsProvider(
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientId,
String userAgent,
ConnectionMode connectionMode) {
checkNotNull(clientTelemetryConfig, "Argument 'clientTelemetryConfig' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.telemetryConfig = clientTelemetryConfig;
this.diagnosticHandlers = new ArrayList<>(
clientTelemetryConfigAccessor.getDiagnosticHandlers(clientTelemetryConfig));
Tracer tracerCandidate = clientTelemetryConfigAccessor.getOrCreateTracer(clientTelemetryConfig);
if (tracerCandidate.isEnabled()) {
this.tracer = tracerCandidate;
} else {
if (!this.diagnosticHandlers.isEmpty()) {
this.tracer = EnabledNoOpTracer.INSTANCE;
} else {
this.tracer = tracerCandidate;
}
}
if (this.tracer.isEnabled()) {
if (clientTelemetryConfigAccessor.isLegacyTracingEnabled(clientTelemetryConfig)) {
this.cosmosTracer = new LegacyCosmosTracer(this.tracer);
} else {
this.cosmosTracer = new OpenTelemetryCosmosTracer(
this.tracer,
clientTelemetryConfig,
clientId,
userAgent,
connectionMode.name().toLowerCase(Locale.ROOT));
}
} else {
this.cosmosTracer = null;
}
this.propagatingMono = new PropagatingMono();
this.propagatingFlux = new PropagatingFlux();
this.shouldSystemExitOnError = Configs.shouldDiagnosticsProviderSystemExitOnError();
}
public boolean isEnabled() {
return this.tracer.isEnabled();
}
public boolean isRealTracer() {
return this.tracer.isEnabled() && this.tracer != EnabledNoOpTracer.INSTANCE;
}
public String getTraceConfigLog() {
StringBuilder sb = new StringBuilder();
sb.append(this.isEnabled());
sb.append(", ");
sb.append(this.isRealTracer());
sb.append(", ");
sb.append(this.tracer.getClass().getCanonicalName());
if (!this.diagnosticHandlers.isEmpty()) {
sb.append(", [");
for (int i = 0; i < this.diagnosticHandlers.size(); i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(this.diagnosticHandlers.get(i).getClass().getCanonicalName());
}
sb.append("]");
}
return sb.toString();
}
public CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.telemetryConfig;
}
/**
* Gets {@link Context} from Reactor {@link ContextView}.
*
* @param reactorContext Reactor context instance.
* @return {@link Context} from reactor context or null if not present.
*/
public static Context getContextFromReactorOrNull(ContextView reactorContext) {
Object context = reactorContext.getOrDefault(REACTOR_TRACING_CONTEXT_KEY, null);
if (context instanceof Context) {
return (Context) context;
}
return null;
}
/**
* Stores {@link Context} in Reactor {@link reactor.util.context.Context}.
*
* @param traceContext {@link Context} context with trace context to store.
* @return {@link reactor.util.context.Context} Reactor context with trace context.
*/
public static reactor.util.context.Context setContextInReactor(Context traceContext) {
return reactor.util.context.Context.of(REACTOR_TRACING_CONTEXT_KEY, traceContext);
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <br/>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(
String spanName,
CosmosDiagnosticsContext cosmosCtx,
Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
ctxAccessor.startOperation(cosmosCtx);
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
if (this.cosmosTracer == null) {
return local;
}
return this.cosmosTracer.startSpan(spanName, cosmosCtx, local);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T> void endSpan(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics
) {
try {
this.endSpanCore(signal, cosmosCtx, statusCode, actualItemCount, requestCharge, diagnostics);
} catch (Throwable error) {
this.handleErrors(error, 9901);
}
}
private <T> void endSpanCore(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics
) {
Objects.requireNonNull(signal, "'signal' cannot be null.");
Context context = getContextFromReactorOrNull(signal.getContextView());
if (context == null) {
return;
}
switch (signal.getType()) {
case ON_COMPLETE:
end(
cosmosCtx,
statusCode,
0,
actualItemCount,
requestCharge,
diagnostics,
null,
context,
ctxAccessor.isEmptyCompletion(cosmosCtx));
break;
case ON_NEXT:
end(
cosmosCtx,
statusCode,
0,
actualItemCount,
requestCharge,
diagnostics,
null,
context,
false);
break;
case ON_ERROR:
Throwable throwable = null;
int subStatusCode = 0;
Double effectiveRequestCharge = requestCharge;
CosmosDiagnostics effectiveDiagnostics = diagnostics;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
subStatusCode = exception.getSubStatusCode();
if (effectiveRequestCharge != null) {
effectiveRequestCharge += exception.getRequestCharge();
} else {
effectiveRequestCharge = exception.getRequestCharge();
}
effectiveDiagnostics = exception.getDiagnostics();
if (effectiveDiagnostics != null) {
diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(effectiveDiagnostics).set(true);
}
}
}
end(
cosmosCtx,
statusCode,
subStatusCode,
actualItemCount,
effectiveRequestCharge,
effectiveDiagnostics,
throwable,
context,
false);
break;
default:
break;
}
}
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, Throwable throwable) {
try {
int statusCode = DiagnosticsProvider.ERROR_CODE;
int subStatusCode = 0;
Double effectiveRequestCharge = null;
CosmosDiagnostics effectiveDiagnostics = null;
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
subStatusCode = exception.getSubStatusCode();
effectiveRequestCharge = exception.getRequestCharge();
effectiveDiagnostics = exception.getDiagnostics();
}
end(
cosmosCtx,
statusCode,
subStatusCode,
null,
effectiveRequestCharge,
effectiveDiagnostics,
throwable,
context,
false);
} catch (Throwable error) {
this.handleErrors(error, 9905);
}
}
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isForcedEmptyCompletion) {
try {
end(
cosmosCtx,
200,
0,
null,
null,
null,
null,
context,
isForcedEmptyCompletion);
} catch (Throwable error) {
this.handleErrors(error, 9904);
}
}
public void recordPage(
CosmosDiagnosticsContext cosmosCtx,
CosmosDiagnostics diagnostics,
Integer actualItemCount,
Double requestCharge
) {
try {
this.recordPageCore(cosmosCtx, diagnostics, actualItemCount, requestCharge);
} catch (Throwable error) {
this.handleErrors(error, 9902);
}
}
private void recordPageCore(
CosmosDiagnosticsContext cosmosCtx,
CosmosDiagnostics diagnostics,
Integer actualItemCount,
Double requestCharge
) {
ctxAccessor.recordOperation(
cosmosCtx, 200, 0, actualItemCount, requestCharge, diagnostics, null);
}
public <T> void recordFeedResponseConsumerLatency(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
Duration feedResponseConsumerLatency
) {
try {
Objects.requireNonNull(signal, "'signal' cannot be null.");
Objects.requireNonNull(feedResponseConsumerLatency, "'feedResponseConsumerLatency' cannot be null.");
checkArgument(
signal.getType() == SignalType.ON_COMPLETE || signal.getType() == SignalType.ON_ERROR,
"recordFeedResponseConsumerLatency should only be used for terminal signal");
Context context = getContextFromReactorOrNull(signal.getContextView());
if (context == null) {
return;
}
this.recordFeedResponseConsumerLatencyCore(
context, cosmosCtx, feedResponseConsumerLatency);
} catch (Throwable error) {
this.handleErrors(error, 9903);
}
}
private void recordFeedResponseConsumerLatencyCore(
Context context,
CosmosDiagnosticsContext cosmosCtx,
Duration feedResponseConsumerLatency
) {
Objects.requireNonNull(cosmosCtx, "'cosmosCtx' cannot be null.");
Objects.requireNonNull(feedResponseConsumerLatency, "'feedResponseConsumerLatency' cannot be null.");
if (feedResponseConsumerLatency.compareTo(FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD) <= 0 &&
!LOGGER.isDebugEnabled()) {
return;
}
if (feedResponseConsumerLatency.compareTo(FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD) <= 0 &&
LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Total duration spent in FeedResponseConsumer is {} but does not exceed threshold of {}, Diagnostics: {}",
feedResponseConsumerLatency,
FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD,
cosmosCtx);
return;
}
if (context != null && this.isRealTracer()) {
Map<String, Object> attributes = new HashMap<>();
String trigger = "SlowFeedResponse";
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
return;
}
LOGGER.warn(
"Total duration spent in FeedResponseConsumer is {} and exceeds threshold of {}, Diagnostics: {}",
feedResponseConsumerLatency,
FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD,
cosmosCtx);
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(
Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String containerId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions) {
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
null,
null,
(r) -> r.getStatusCode(),
(r) -> null,
(r) -> r.getRequestCharge(),
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
public <T extends CosmosBatchResponse> Mono<T> traceEnabledBatchResponsePublisher(
Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String containerId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions) {
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
null,
null,
CosmosBatchResponse::getStatusCode,
(r) -> null,
CosmosBatchResponse::getRequestCharge,
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(
Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions,
String trackingId) {
checkNotNull(requestOptions, "Argument 'requestOptions' must not be null.");
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
trackingId,
null,
CosmosItemResponse::getStatusCode,
(r) -> null,
CosmosItemResponse::getRequestCharge,
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
/**
* Runs given {@code Flux<T>} publisher in the scope of trace context passed in using
* {@link DiagnosticsProvider
* Populates active trace context on Reactor's hot path. Reactor's instrumentation for OpenTelemetry
* (or other hypothetical solution) will take care of the cold path.
*
* @param publisher publisher to run.
* @return wrapped publisher.
*/
public <T> Flux<T> runUnderSpanInContext(Flux<T> publisher) {
return propagatingFlux.flatMap(ignored -> publisher);
}
public boolean shouldSampleOutOperation(CosmosPagedFluxOptions options) {
final double samplingRateSnapshot = clientTelemetryConfigAccessor.getSamplingRate(this.telemetryConfig);
options.setSamplingRateSnapshot(samplingRateSnapshot);
return shouldSampleOutOperation(samplingRateSnapshot);
}
private boolean shouldSampleOutOperation(double samplingRate) {
if (samplingRate == 1) {
return false;
}
if (samplingRate == 0) {
return true;
}
return ThreadLocalRandom.current().nextDouble() >= samplingRate;
}
private <T> Mono<T> diagnosticsEnabledPublisher(
CosmosDiagnosticsContext cosmosCtx,
Mono<T> resultPublisher,
Context context,
String spanName,
Function<T, Integer> statusCodeFunc,
Function<T, Integer> actualItemCountFunc,
Function<T, Double> requestChargeFunc,
BiFunction<T, Double, CosmosDiagnostics> diagnosticsFunc
) {
if (!isEnabled()) {
return resultPublisher;
}
final double samplingRateSnapshot = clientTelemetryConfigAccessor.getSamplingRate(this.telemetryConfig);
if (cosmosCtx != null) {
ctxAccessor.setSamplingRateSnapshot(cosmosCtx, samplingRateSnapshot);
}
if (shouldSampleOutOperation(samplingRateSnapshot)) {
return resultPublisher;
}
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
if (isNestedCall) {
return resultPublisher;
}
return propagatingMono
.flatMap(ignored -> resultPublisher)
.doOnEach(signal -> {
switch (signal.getType()) {
case ON_NEXT:
T response = signal.get();
this.endSpan(
signal,
cosmosCtx,
statusCodeFunc.apply(response),
actualItemCountFunc.apply(response),
requestChargeFunc.apply(response),
diagnosticsFunc.apply(response, samplingRateSnapshot));
break;
case ON_ERROR:
this.endSpan(
signal,
cosmosCtx,
ERROR_CODE,
null,
null,
null);
break;
default:
break;
}})
.contextWrite(setContextInReactor(this.startSpan(spanName, cosmosCtx, context)));
}
private <T> Mono<T> publisherWithDiagnostics(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String accountName,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
String trackingId,
Integer maxItemCount,
Function<T, Integer> statusCodeFunc,
Function<T, Integer> actualItemCountFunc,
Function<T, Double> requestChargeFunc,
BiFunction<T, Double, CosmosDiagnostics> diagnosticFunc,
RequestOptions requestOptions) {
CosmosDiagnosticsThresholds thresholds = requestOptions != null
? clientAccessor.getEffectiveDiagnosticsThresholds(client, requestOptions.getDiagnosticsThresholds())
: clientAccessor.getEffectiveDiagnosticsThresholds(client, null);
CosmosDiagnosticsContext cosmosCtx = ctxAccessor.create(
spanName,
accountName,
BridgeInternal.getServiceEndpoint(client),
databaseId,
containerId,
resourceType,
operationType,
null,
clientAccessor.getEffectiveConsistencyLevel(client, operationType, consistencyLevel),
maxItemCount,
thresholds,
trackingId,
clientAccessor.getConnectionMode(client),
clientAccessor.getUserAgent(client),
null);
if (requestOptions != null) {
requestOptions.setDiagnosticsContextSupplier(() -> cosmosCtx);
}
return diagnosticsEnabledPublisher(
cosmosCtx,
resultPublisher,
context,
spanName,
statusCodeFunc,
actualItemCountFunc,
requestChargeFunc,
diagnosticFunc);
}
private void end(
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
int subStatusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics,
Throwable throwable,
Context context,
boolean isForcedEmptyCompletion) {
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
if (ctxAccessor.endOperation(
cosmosCtx,
statusCode,
subStatusCode,
actualItemCount,
requestCharge,
diagnostics,
throwable)) {
if (!isForcedEmptyCompletion) {
this.handleDiagnostics(context, cosmosCtx);
}
if (this.cosmosTracer != null) {
this.cosmosTracer.endSpan(cosmosCtx, context, isForcedEmptyCompletion);
}
}
}
private static void subscribe(Tracer tracer, CoreSubscriber<? super Object> actual) {
Context context = getContextFromReactorOrNull(actual.currentContext());
if (context != null) {
AutoCloseable scope = tracer.makeSpanCurrent(context);
try {
actual.onSubscribe(Operators.scalarSubscription(actual, DUMMY_VALUE));
} finally {
try {
scope.close();
} catch (Exception e) {
LOGGER.error("Unexpected failure closing tracer scope.", e);
throw new IllegalStateException("Unexpected failure closing tracer scope.", e);
}
}
} else {
actual.onSubscribe(Operators.scalarSubscription(actual, DUMMY_VALUE));
}
}
/**
* Helper class allowing running Mono subscription (and anything on the hot path)
* in scope of trace context. This enables OpenTelemetry auto-collection
* to pick it up and correlate lower levels of instrumentation and logs
* to logical Cosmos spans.
* <br/>
* OpenTelemetry reactor auto-instrumentation will take care of the cold path.
*/
private final class PropagatingMono extends Mono<Object> {
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
DiagnosticsProvider.subscribe(tracer, actual);
}
}
/**
* Helper class allowing running Flux subscription (and anything on the hot path)
* in scope of trace context. This enables OpenTelemetry auto-collection
* to pick it up and correlate lower levels of instrumentation and logs
* to logical Cosmos spans.
* <br/>
* OpenTelemetry reactor auto-instrumentation will take care of the cold path.
*/
private final class PropagatingFlux extends Flux<Object> {
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
DiagnosticsProvider.subscribe(tracer, actual);
}
}
private interface CosmosTracer {
Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context);
void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion);
}
private static final class LegacyCosmosTracer implements CosmosTracer {
private final static String JSON_STRING = "JSON";
private final Tracer tracer;
public LegacyCosmosTracer(Tracer tracer) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
this.tracer = tracer;
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx.getDatabaseName(),
ctxAccessor.getEndpoint(cosmosCtx));
return tracer.start(spanName, spanOptions, context);
}
private StartSpanOptions startSpanOptions(String methodName, String databaseId, String endpoint) {
StartSpanOptions spanOptions = new StartSpanOptions(SpanKind.CLIENT)
.setAttribute(DB_TYPE, DB_TYPE_VALUE)
.setAttribute(LEGACY_DB_URL, endpoint)
.setAttribute(LEGACY_DB_STATEMENT, methodName);
if (databaseId != null) {
spanOptions.setAttribute(LEGACY_DB_INSTANCE, databaseId);
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
try {
if (cosmosCtx != null && cosmosCtx.isThresholdViolated()) {
Collection<CosmosDiagnostics> diagnostics = cosmosCtx.getDiagnostics();
if (diagnostics != null && diagnostics.size() > 0) {
for (CosmosDiagnostics d: diagnostics) {
addDiagnosticsOnTracerEvent(d, context);
}
}
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer.", ex);
}
if (cosmosCtx != null) {
tracer.end(cosmosCtx.getStatusCode(), cosmosCtx.getFinalError(), context);
}
}
private void addClientSideRequestStatisticsOnTracerEvent(
ClientSideRequestStatistics clientSideRequestStatistics,
Context context) throws JsonProcessingException {
if (clientSideRequestStatistics == null || context == null) {
return;
}
Map<String, Object> attributes;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator = storeResponseStatistics.getStoreResult().getStoreResponseDiagnostics().getRequestTimeline().iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
for (RequestTimeline.Event event :
statistics.getStoreResult().getStoreResponseDiagnostics().getRequestTimeline()) {
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : clientSideRequestStatistics.getGatewayStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(gatewayStats));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (gatewayStats.getRequestTimeline() != null) {
for (RequestTimeline.Event event : gatewayStats.getRequestTimeline()) {
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getContactedRegionNames()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientConfig()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
if (clientSideRequestStatistics.getResponseStatisticsList() != null && clientSideRequestStatistics.getResponseStatisticsList().size() > 0
&& clientSideRequestStatistics.getResponseStatisticsList().iterator().next() != null) {
String eventName =
"Diagnostics for PKRange "
+ clientSideRequestStatistics.getResponseStatisticsList().iterator().next().getStoreResult().getStoreResponseDiagnostics().getPartitionKeyRangeId();
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
} else if (clientSideRequestStatistics.getGatewayStatisticsList() != null && clientSideRequestStatistics.getGatewayStatisticsList().size() > 0) {
String eventName =
"Diagnostics for PKRange " + clientSideRequestStatistics.getGatewayStatisticsList().get(0).getPartitionKeyRangeId();
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
} else {
String eventName = "Diagnostics ";
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null || context == null) {
return;
}
Map<String, Object> attributes;
FeedResponseDiagnostics feedResponseDiagnostics =
diagnosticsAccessor.getFeedResponseDiagnostics(cosmosDiagnostics);
if (feedResponseDiagnostics != null) {
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics = feedResponseDiagnostics
.getQueryPlanDiagnosticsContext();
if (queryPlanDiagnostics != null) {
attributes = new HashMap<>();
attributes.put("JSON",
mapper.writeValueAsString(queryPlanDiagnostics));
this.addEvent(
"Query Plan Statistics",
attributes,
OffsetDateTime.ofInstant(queryPlanDiagnostics.getStartTimeUTC(), ZoneOffset.UTC),
context);
}
Map<String, QueryMetrics> queryMetrics = feedResponseDiagnostics.getQueryMetricsMap();
if (queryMetrics != null && queryMetrics.size() > 0) {
for(Map.Entry<String, QueryMetrics> entry : queryMetrics.entrySet()) {
attributes = new HashMap<>();
attributes.put("Query Metrics", entry.getValue().toString());
this.addEvent("Query Metrics for PKRange " + entry.getKey(), attributes,
OffsetDateTime.now(), context);
}
}
for (ClientSideRequestStatistics c: feedResponseDiagnostics.getClientSideRequestStatistics()) {
addClientSideRequestStatisticsOnTracerEvent(c, context);
}
}
addClientSideRequestStatisticsOnTracerEvent(
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics),
context);
}
void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
}
private static void emitDiagnosticsEvents(Tracer tracer, CosmosDiagnosticsContext cosmosCtx, String trigger, Context context) {
Map<String, Object> attributes = new HashMap<>();
String message = trigger + " - CTX: " + cosmosCtx.toJson();
List<String> messageFragments = Splitter.fixedLength(Configs.getMaxTraceMessageLength()).splitToList(message);
attributes.put("Trigger", trigger);
for (int i = 0; i < messageFragments.size(); i++) {
attributes.put("SequenceNumber", String.format(Locale.ROOT,"%05d", i + 1));
tracer.addEvent(messageFragments.get(i), attributes, OffsetDateTime.now(), context);
}
}
private final static class OpenTelemetryCosmosTracer implements CosmosTracer {
private final Tracer tracer;
private final CosmosClientTelemetryConfig config;
private final String clientId;
private final String connectionMode;
private final String userAgent;
public OpenTelemetryCosmosTracer(
Tracer tracer,
CosmosClientTelemetryConfig config,
String clientId,
String userAgent,
String connectionMode) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
checkNotNull(config, "Argument 'config' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.tracer = tracer;
this.config = config;
this.clientId = clientId;
this.userAgent = userAgent;
this.connectionMode = connectionMode;
}
private boolean isTransportLevelTracingEnabled() {
return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config);
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx);
return tracer.start(spanName, spanOptions, local);
}
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) {
StartSpanOptions spanOptions;
if (tracer instanceof EnabledNoOpTracer) {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL);
} else {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL)
.setAttribute("db.system", "cosmosdb")
.setAttribute("db.operation", spanName)
.setAttribute("net.peer.name", cosmosCtx.getAccountName())
.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType())
.setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType())
.setAttribute("db.name", cosmosCtx.getDatabaseName())
.setAttribute("db.cosmosdb.client_id", this.clientId)
.setAttribute("user_agent.original", this.userAgent)
.setAttribute("db.cosmosdb.connection_mode", this.connectionMode);
if (!cosmosCtx.getOperationId().isEmpty() &&
!cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) {
spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId());
}
String containerName = cosmosCtx.getContainerName();
if (containerName != null) {
spanOptions.setAttribute("db.cosmosdb.container", containerName);
}
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
if (cosmosCtx == null) {
return;
}
if (!cosmosCtx.isCompleted()) {
tracer.end("CosmosCtx not completed yet.", null, context);
return;
}
String errorMessage = null;
Throwable finalError = cosmosCtx.getFinalError();
if (finalError != null && cosmosCtx.isFailure()) {
if (finalError instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) finalError;
errorMessage = cosmosException.getShortMessage();
} else {
errorMessage = finalError.getMessage();
}
}
if (tracer instanceof EnabledNoOpTracer) {
tracer.end(errorMessage, finalError, context);
return;
}
if (isEmptyCompletion) {
tracer.setAttribute(
"db.cosmosdb.is_empty_completion",
Boolean.toString(true),
context);
tracer.end(errorMessage, finalError, context);
return;
}
if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) {
String trigger;
if (cosmosCtx.isFailure()) {
trigger = "Failure";
} else {
trigger = "ThresholdViolation";
}
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
}
if (finalError != null) {
String exceptionType;
if (finalError instanceof CosmosException) {
exceptionType = CosmosException.class.getCanonicalName();
} else {
exceptionType = finalError.getClass().getCanonicalName();
}
tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context);
tracer.setAttribute("exception.type", exceptionType, context);
if (errorMessage != null) {
tracer.setAttribute("exception.message", errorMessage, context);
}
tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context);
}
if (this.isTransportLevelTracingEnabled()) {
traceTransportLevel(cosmosCtx, context);
}
tracer.setAttribute(
"db.cosmosdb.status_code",
Integer.toString(cosmosCtx.getStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.sub_status_code",
Integer.toString(cosmosCtx.getSubStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.request_charge",
Float.toString(cosmosCtx.getTotalRequestCharge()),
context);
tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context);
Set<String> regionsContacted = cosmosCtx.getContactedRegionNames();
if (!regionsContacted.isEmpty()) {
tracer.setAttribute(
"db.cosmosdb.regions_contacted",
String.join(", ", regionsContacted),
context);
}
tracer.end(errorMessage, finalError, context);
}
private void recordStoreResponseStatistics(
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics,
Context context) {
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Map<String, Object> attributes = new HashMap<>();
attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString());
attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString());
attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString());
attributes.put("rntbd.region", responseStatistics.getRegionName());
if (storeResultDiagnostics.getLsn() > 0) {
attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn()));
}
if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) {
attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN()));
}
String responseSessionToken = responseStatistics.getRequestSessionToken();
if (responseSessionToken != null && !responseSessionToken.isEmpty()) {
attributes.put("rntbd.session_token", responseSessionToken);
}
String requestSessionToken = responseStatistics.getRequestSessionToken();
if (requestSessionToken != null && !requestSessionToken.isEmpty()) {
attributes.put("rntbd.request_session_token", requestSessionToken);
}
String activityId = storeResponseDiagnostics.getActivityId();
if (activityId != null && !activityId.isEmpty()) {
attributes.put("rntbd.activity_id", activityId);
}
String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
if (pkRangeId != null && !pkRangeId.isEmpty()) {
attributes.put("rntbd.partition_key_range_id", pkRangeId);
}
attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode()));
if (storeResponseDiagnostics.getSubStatusCode() != 0) {
attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode()));
}
if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) {
attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId());
}
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
attributes.put("rntbd.backend_latency", Double.toString(backendLatency));
}
double requestCharge = storeResponseDiagnostics.getRequestCharge();
attributes.put("rntbd.request_charge", Double.toString(requestCharge));
Duration latency = responseStatistics.getDuration();
if (latency != null) {
attributes.put("rntbd.latency", latency.toString());
}
if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) {
attributes.put(
"rntbd.is_new_channel",
storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit());
}
OffsetDateTime startTime = null;
for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) {
OffsetDateTime eventTime = event.getStartTime() != null ?
event.getStartTime().atOffset(ZoneOffset.UTC) : null;
if (eventTime != null &&
(startTime == null || startTime.isBefore(eventTime))) {
startTime = eventTime;
}
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString());
}
attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength());
attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength());
this.tracer.addEvent(
"rntbd.request",
attributes,
startTime != null ? startTime : OffsetDateTime.now(),
context);
}
}
private void traceTransportLevelRequests(
Collection<ClientSideRequestStatistics> clientSideRequestStatistics,
Context context) {
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
requestStatistics.getResponseStatisticsList(),
context);
recordStoreResponseStatistics(
requestStatistics.getSupplementalResponseStatisticsList(),
context);
}
}
}
private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) {
Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics =
ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext);
traceTransportLevelRequests(
combinedClientSideRequestStatistics,
context);
}
}
public static String prettifyCallstack(Throwable e) {
StringWriter stackWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stackWriter);
e.printStackTrace(printWriter);
printWriter.flush();
stackWriter.flush();
String prettifiedCallstack = stackWriter.toString();
String message = e.toString();
if (prettifiedCallstack.length() > message.length()) {
prettifiedCallstack = prettifiedCallstack.substring(message.length());
}
printWriter.close();
try {
stackWriter.close();
} catch (IOException closeError) {
LOGGER.warn("Error trying to close StringWriter.", closeError);
}
return prettifiedCallstack;
}
private void handleErrors(Throwable throwable, int systemExitCode) {
if (throwable instanceof Error) {
handleFatalError((Error) throwable, systemExitCode);
} else {
LOGGER.error("Unexpected exception in DiagnosticsProvider.endSpan. ", throwable);
throw new RuntimeException(throwable);
}
}
private void handleFatalError(Error error, int systemExitCode) {
Exception exception = DiagnosticsProviderJvmFatalErrorMapper.getMapper().mapFatalError(error);
if (exception != null) {
String errorMessage = "Runtime exception mapped from fatal error " + error;
throw new RuntimeException(errorMessage, exception);
}
if (this.shouldSystemExitOnError) {
LOGGER.error("Unexpected error in DiagnosticsProvider.endSpan. Calling System.exit({})...", systemExitCode, error);
System.err.println(
String.format(
"Unexpected error in DiagnosticsProvider.endSpan. Calling System.exit(%d)... %s",
systemExitCode,
error)
);
System.exit(systemExitCode);
} else {
throw error;
}
}
private static final class EnabledNoOpTracer implements Tracer {
public static final Tracer INSTANCE = new EnabledNoOpTracer();
private EnabledNoOpTracer() {
}
@Override
public Context start(String methodName, Context context) {
return context;
}
@Override
public Context start(String methodName, Context context, ProcessKind processKind) {
return context;
}
@Override
public void end(int responseCode, Throwable error, Context context) {
}
@Override
public void end(String errorCondition, Throwable error, Context context) {
}
@Override
public void setAttribute(String key, String value, Context context) {
}
@Override
public Context setSpanName(String spanName, Context context) {
return Context.NONE;
}
@Override
public void addLink(Context context) {
}
@Override
public Context extractContext(String diagnosticId, Context context) {
return Context.NONE;
}
@Override
public Context getSharedSpanBuilder(String spanName, Context context) {
return Context.NONE;
}
}
} |
updated | private void handleDiagnostics(Context context, CosmosDiagnosticsContext cosmosCtx) {
if (this.diagnosticHandlers != null && this.diagnosticHandlers.size() > 0) {
for (CosmosDiagnosticsHandler handler: this.diagnosticHandlers) {
try {
handler.handleDiagnostics(cosmosCtx, context);
} catch (Exception e) {
LOGGER.warn("HandledDiagnostics failed. ", e);
}
}
}
} | LOGGER.warn("HandledDiagnostics failed. ", e); | private void handleDiagnostics(Context context, CosmosDiagnosticsContext cosmosCtx) {
if (this.diagnosticHandlers != null && this.diagnosticHandlers.size() > 0) {
for (CosmosDiagnosticsHandler handler: this.diagnosticHandlers) {
try {
handler.handleDiagnostics(cosmosCtx, context);
} catch (Exception e) {
LOGGER.error("HandledDiagnostics failed. ", e);
}
}
}
} | class DiagnosticsProvider {
private static final ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor clientTelemetryConfigAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private static final ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final Logger LOGGER = LoggerFactory.getLogger(DiagnosticsProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String LEGACY_DB_URL = "db.url";
public static final String LEGACY_DB_STATEMENT = "db.statement";
public final static String LEGACY_DB_INSTANCE = "db.instance";
private final static Duration FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD = Duration.ofMillis(5);
private static final String REACTOR_TRACING_CONTEXT_KEY = "tracing-context";
private static final String COSMOS_DIAGNOSTICS_CONTEXT_KEY = "azure-cosmos-context";
private static final Object DUMMY_VALUE = new Object();
private final Mono<Object> propagatingMono;
private final Flux<Object> propagatingFlux;
private final ArrayList<CosmosDiagnosticsHandler> diagnosticHandlers;
private final Tracer tracer;
private final CosmosTracer cosmosTracer;
private final CosmosClientTelemetryConfig telemetryConfig;
public DiagnosticsProvider(
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientId,
String userAgent,
ConnectionMode connectionMode) {
checkNotNull(clientTelemetryConfig, "Argument 'clientTelemetryConfig' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.telemetryConfig = clientTelemetryConfig;
this.diagnosticHandlers = new ArrayList<>(
clientTelemetryConfigAccessor.getDiagnosticHandlers(clientTelemetryConfig));
Tracer tracerCandidate = clientTelemetryConfigAccessor.getOrCreateTracer(clientTelemetryConfig);
if (tracerCandidate.isEnabled()) {
this.tracer = tracerCandidate;
} else {
if (!this.diagnosticHandlers.isEmpty()) {
this.tracer = EnabledNoOpTracer.INSTANCE;
} else {
this.tracer = tracerCandidate;
}
}
if (this.tracer.isEnabled()) {
if (clientTelemetryConfigAccessor.isLegacyTracingEnabled(clientTelemetryConfig)) {
this.cosmosTracer = new LegacyCosmosTracer(this.tracer);
} else {
this.cosmosTracer = new OpenTelemetryCosmosTracer(
this.tracer,
clientTelemetryConfig,
clientId,
userAgent,
connectionMode.name().toLowerCase(Locale.ROOT));
}
} else {
this.cosmosTracer = null;
}
this.propagatingMono = new PropagatingMono();
this.propagatingFlux = new PropagatingFlux();
}
public boolean isEnabled() {
return this.tracer.isEnabled();
}
public boolean isRealTracer() {
return this.tracer.isEnabled() && this.tracer != EnabledNoOpTracer.INSTANCE;
}
public String getTraceConfigLog() {
StringBuilder sb = new StringBuilder();
sb.append(this.isEnabled());
sb.append(", ");
sb.append(this.isRealTracer());
sb.append(", ");
sb.append(this.tracer.getClass().getCanonicalName());
if (!this.diagnosticHandlers.isEmpty()) {
sb.append(", [");
for (int i = 0; i < this.diagnosticHandlers.size(); i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(this.diagnosticHandlers.get(i).getClass().getCanonicalName());
}
sb.append("]");
}
return sb.toString();
}
public CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.telemetryConfig;
}
/**
* Gets {@link Context} from Reactor {@link ContextView}.
*
* @param reactorContext Reactor context instance.
* @return {@link Context} from reactor context or null if not present.
*/
public static Context getContextFromReactorOrNull(ContextView reactorContext) {
Object context = reactorContext.getOrDefault(REACTOR_TRACING_CONTEXT_KEY, null);
if (context instanceof Context) {
return (Context) context;
}
return null;
}
/**
* Stores {@link Context} in Reactor {@link reactor.util.context.Context}.
*
* @param traceContext {@link Context} context with trace context to store.
* @return {@link reactor.util.context.Context} Reactor context with trace context.
*/
public static reactor.util.context.Context setContextInReactor(Context traceContext) {
return reactor.util.context.Context.of(REACTOR_TRACING_CONTEXT_KEY, traceContext);
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <br/>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(
String spanName,
CosmosDiagnosticsContext cosmosCtx,
Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
ctxAccessor.startOperation(cosmosCtx);
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
if (this.cosmosTracer == null) {
return local;
}
return this.cosmosTracer.startSpan(spanName, cosmosCtx, local);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T> void endSpan(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics
) {
try {
this.endSpanCore(signal, cosmosCtx, statusCode, actualItemCount, requestCharge, diagnostics);
} catch (Throwable error) {
this.handleErrors(error, 9901);
}
}
private <T> void endSpanCore(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics
) {
Objects.requireNonNull(signal, "'signal' cannot be null.");
Context context = getContextFromReactorOrNull(signal.getContextView());
if (context == null) {
return;
}
switch (signal.getType()) {
case ON_COMPLETE:
end(
cosmosCtx,
statusCode,
0,
actualItemCount,
requestCharge,
diagnostics,
null,
context,
ctxAccessor.isEmptyCompletion(cosmosCtx));
break;
case ON_NEXT:
end(
cosmosCtx,
statusCode,
0,
actualItemCount,
requestCharge,
diagnostics,
null,
context,
false);
break;
case ON_ERROR:
Throwable throwable = null;
int subStatusCode = 0;
Double effectiveRequestCharge = requestCharge;
CosmosDiagnostics effectiveDiagnostics = diagnostics;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
subStatusCode = exception.getSubStatusCode();
if (effectiveRequestCharge != null) {
effectiveRequestCharge += exception.getRequestCharge();
} else {
effectiveRequestCharge = exception.getRequestCharge();
}
effectiveDiagnostics = exception.getDiagnostics();
if (effectiveDiagnostics != null) {
diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(effectiveDiagnostics).set(true);
}
}
}
end(
cosmosCtx,
statusCode,
subStatusCode,
actualItemCount,
effectiveRequestCharge,
effectiveDiagnostics,
throwable,
context,
false);
break;
default:
break;
}
}
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, Throwable throwable) {
try {
int statusCode = DiagnosticsProvider.ERROR_CODE;
int subStatusCode = 0;
Double effectiveRequestCharge = null;
CosmosDiagnostics effectiveDiagnostics = null;
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
subStatusCode = exception.getSubStatusCode();
effectiveRequestCharge = exception.getRequestCharge();
effectiveDiagnostics = exception.getDiagnostics();
}
end(
cosmosCtx,
statusCode,
subStatusCode,
null,
effectiveRequestCharge,
effectiveDiagnostics,
throwable,
context,
false);
} catch (Throwable error) {
this.handleErrors(error, 9905);
}
}
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isForcedEmptyCompletion) {
try {
end(
cosmosCtx,
200,
0,
null,
null,
null,
null,
context,
isForcedEmptyCompletion);
} catch (Throwable error) {
this.handleErrors(error, 9904);
}
}
public void recordPage(
CosmosDiagnosticsContext cosmosCtx,
CosmosDiagnostics diagnostics,
Integer actualItemCount,
Double requestCharge
) {
try {
this.recordPageCore(cosmosCtx, diagnostics, actualItemCount, requestCharge);
} catch (Throwable error) {
this.handleErrors(error, 9902);
}
}
private void recordPageCore(
CosmosDiagnosticsContext cosmosCtx,
CosmosDiagnostics diagnostics,
Integer actualItemCount,
Double requestCharge
) {
ctxAccessor.recordOperation(
cosmosCtx, 200, 0, actualItemCount, requestCharge, diagnostics, null);
}
public <T> void recordFeedResponseConsumerLatency(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
Duration feedResponseConsumerLatency
) {
try {
Objects.requireNonNull(signal, "'signal' cannot be null.");
Objects.requireNonNull(feedResponseConsumerLatency, "'feedResponseConsumerLatency' cannot be null.");
checkArgument(
signal.getType() == SignalType.ON_COMPLETE || signal.getType() == SignalType.ON_ERROR,
"recordFeedResponseConsumerLatency should only be used for terminal signal");
Context context = getContextFromReactorOrNull(signal.getContextView());
if (context == null) {
return;
}
this.recordFeedResponseConsumerLatencyCore(
context, cosmosCtx, feedResponseConsumerLatency);
} catch (Throwable error) {
this.handleErrors(error, 9902);
}
}
private void recordFeedResponseConsumerLatencyCore(
Context context,
CosmosDiagnosticsContext cosmosCtx,
Duration feedResponseConsumerLatency
) {
Objects.requireNonNull(cosmosCtx, "'cosmosCtx' cannot be null.");
Objects.requireNonNull(feedResponseConsumerLatency, "'feedResponseConsumerLatency' cannot be null.");
if (feedResponseConsumerLatency.compareTo(FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD) <= 0 &&
!LOGGER.isDebugEnabled()) {
return;
}
if (feedResponseConsumerLatency.compareTo(FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD) <= 0 &&
LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Total duration spent in FeedResponseConsumer is {} but does not exceed threshold of {}, Diagnostics: {}",
feedResponseConsumerLatency,
FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD,
cosmosCtx);
return;
}
if (context != null && this.isRealTracer()) {
Map<String, Object> attributes = new HashMap<>();
String trigger = "SlowFeedResponse";
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
return;
}
LOGGER.warn(
"Total duration spent in FeedResponseConsumer is {} and exceeds threshold of {}, Diagnostics: {}",
feedResponseConsumerLatency,
FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD,
cosmosCtx);
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(
Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String containerId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions) {
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
null,
null,
(r) -> r.getStatusCode(),
(r) -> null,
(r) -> r.getRequestCharge(),
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
public <T extends CosmosBatchResponse> Mono<T> traceEnabledBatchResponsePublisher(
Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String containerId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions) {
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
null,
null,
CosmosBatchResponse::getStatusCode,
(r) -> null,
CosmosBatchResponse::getRequestCharge,
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(
Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions,
String trackingId) {
checkNotNull(requestOptions, "Argument 'requestOptions' must not be null.");
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
trackingId,
null,
CosmosItemResponse::getStatusCode,
(r) -> null,
CosmosItemResponse::getRequestCharge,
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
/**
* Runs given {@code Flux<T>} publisher in the scope of trace context passed in using
* {@link DiagnosticsProvider
* Populates active trace context on Reactor's hot path. Reactor's instrumentation for OpenTelemetry
* (or other hypothetical solution) will take care of the cold path.
*
* @param publisher publisher to run.
* @return wrapped publisher.
*/
public <T> Flux<T> runUnderSpanInContext(Flux<T> publisher) {
return propagatingFlux.flatMap(ignored -> publisher);
}
public boolean shouldSampleOutOperation(CosmosPagedFluxOptions options) {
final double samplingRateSnapshot = clientTelemetryConfigAccessor.getSamplingRate(this.telemetryConfig);
options.setSamplingRateSnapshot(samplingRateSnapshot);
return shouldSampleOutOperation(samplingRateSnapshot);
}
private boolean shouldSampleOutOperation(double samplingRate) {
if (samplingRate == 1) {
return false;
}
if (samplingRate == 0) {
return true;
}
return ThreadLocalRandom.current().nextDouble() >= samplingRate;
}
private <T> Mono<T> diagnosticsEnabledPublisher(
CosmosDiagnosticsContext cosmosCtx,
Mono<T> resultPublisher,
Context context,
String spanName,
Function<T, Integer> statusCodeFunc,
Function<T, Integer> actualItemCountFunc,
Function<T, Double> requestChargeFunc,
BiFunction<T, Double, CosmosDiagnostics> diagnosticsFunc
) {
if (!isEnabled()) {
return resultPublisher;
}
final double samplingRateSnapshot = clientTelemetryConfigAccessor.getSamplingRate(this.telemetryConfig);
if (cosmosCtx != null) {
ctxAccessor.setSamplingRateSnapshot(cosmosCtx, samplingRateSnapshot);
}
if (shouldSampleOutOperation(samplingRateSnapshot)) {
return resultPublisher;
}
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
if (isNestedCall) {
return resultPublisher;
}
return propagatingMono
.flatMap(ignored -> resultPublisher)
.doOnEach(signal -> {
switch (signal.getType()) {
case ON_NEXT:
T response = signal.get();
this.endSpan(
signal,
cosmosCtx,
statusCodeFunc.apply(response),
actualItemCountFunc.apply(response),
requestChargeFunc.apply(response),
diagnosticsFunc.apply(response, samplingRateSnapshot));
break;
case ON_ERROR:
this.endSpan(
signal,
cosmosCtx,
ERROR_CODE,
null,
null,
null);
break;
default:
break;
}})
.contextWrite(setContextInReactor(this.startSpan(spanName, cosmosCtx, context)));
}
private <T> Mono<T> publisherWithDiagnostics(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String accountName,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
String trackingId,
Integer maxItemCount,
Function<T, Integer> statusCodeFunc,
Function<T, Integer> actualItemCountFunc,
Function<T, Double> requestChargeFunc,
BiFunction<T, Double, CosmosDiagnostics> diagnosticFunc,
RequestOptions requestOptions) {
CosmosDiagnosticsThresholds thresholds = requestOptions != null
? clientAccessor.getEffectiveDiagnosticsThresholds(client, requestOptions.getDiagnosticsThresholds())
: clientAccessor.getEffectiveDiagnosticsThresholds(client, null);
CosmosDiagnosticsContext cosmosCtx = ctxAccessor.create(
spanName,
accountName,
BridgeInternal.getServiceEndpoint(client),
databaseId,
containerId,
resourceType,
operationType,
null,
clientAccessor.getEffectiveConsistencyLevel(client, operationType, consistencyLevel),
maxItemCount,
thresholds,
trackingId,
clientAccessor.getConnectionMode(client),
clientAccessor.getUserAgent(client),
null);
if (requestOptions != null) {
requestOptions.setDiagnosticsContextSupplier(() -> cosmosCtx);
}
return diagnosticsEnabledPublisher(
cosmosCtx,
resultPublisher,
context,
spanName,
statusCodeFunc,
actualItemCountFunc,
requestChargeFunc,
diagnosticFunc);
}
private void end(
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
int subStatusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics,
Throwable throwable,
Context context,
boolean isForcedEmptyCompletion) {
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
if (ctxAccessor.endOperation(
cosmosCtx,
statusCode,
subStatusCode,
actualItemCount,
requestCharge,
diagnostics,
throwable)) {
if (!isForcedEmptyCompletion) {
this.handleDiagnostics(context, cosmosCtx);
}
if (this.cosmosTracer != null) {
this.cosmosTracer.endSpan(cosmosCtx, context, isForcedEmptyCompletion);
}
}
}
private static void subscribe(Tracer tracer, CoreSubscriber<? super Object> actual) {
Context context = getContextFromReactorOrNull(actual.currentContext());
if (context != null) {
AutoCloseable scope = tracer.makeSpanCurrent(context);
try {
actual.onSubscribe(Operators.scalarSubscription(actual, DUMMY_VALUE));
} finally {
try {
scope.close();
} catch (Exception e) {
LOGGER.error("Unexpected failure closing tracer scope.", e);
throw new IllegalStateException("Unexpected failure closing tracer scope.", e);
}
}
} else {
actual.onSubscribe(Operators.scalarSubscription(actual, DUMMY_VALUE));
}
}
/**
* Helper class allowing running Mono subscription (and anything on the hot path)
* in scope of trace context. This enables OpenTelemetry auto-collection
* to pick it up and correlate lower levels of instrumentation and logs
* to logical Cosmos spans.
* <br/>
* OpenTelemetry reactor auto-instrumentation will take care of the cold path.
*/
private final class PropagatingMono extends Mono<Object> {
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
DiagnosticsProvider.subscribe(tracer, actual);
}
}
/**
* Helper class allowing running Flux subscription (and anything on the hot path)
* in scope of trace context. This enables OpenTelemetry auto-collection
* to pick it up and correlate lower levels of instrumentation and logs
* to logical Cosmos spans.
* <br/>
* OpenTelemetry reactor auto-instrumentation will take care of the cold path.
*/
private final class PropagatingFlux extends Flux<Object> {
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
DiagnosticsProvider.subscribe(tracer, actual);
}
}
private interface CosmosTracer {
Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context);
void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion);
}
private static final class LegacyCosmosTracer implements CosmosTracer {
private final static String JSON_STRING = "JSON";
private final Tracer tracer;
public LegacyCosmosTracer(Tracer tracer) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
this.tracer = tracer;
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx.getDatabaseName(),
ctxAccessor.getEndpoint(cosmosCtx));
return tracer.start(spanName, spanOptions, context);
}
private StartSpanOptions startSpanOptions(String methodName, String databaseId, String endpoint) {
StartSpanOptions spanOptions = new StartSpanOptions(SpanKind.CLIENT)
.setAttribute(DB_TYPE, DB_TYPE_VALUE)
.setAttribute(LEGACY_DB_URL, endpoint)
.setAttribute(LEGACY_DB_STATEMENT, methodName);
if (databaseId != null) {
spanOptions.setAttribute(LEGACY_DB_INSTANCE, databaseId);
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
try {
if (cosmosCtx != null && cosmosCtx.isThresholdViolated()) {
Collection<CosmosDiagnostics> diagnostics = cosmosCtx.getDiagnostics();
if (diagnostics != null && diagnostics.size() > 0) {
for (CosmosDiagnostics d: diagnostics) {
addDiagnosticsOnTracerEvent(d, context);
}
}
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer.", ex);
}
if (cosmosCtx != null) {
tracer.end(cosmosCtx.getStatusCode(), cosmosCtx.getFinalError(), context);
}
}
private void addClientSideRequestStatisticsOnTracerEvent(
ClientSideRequestStatistics clientSideRequestStatistics,
Context context) throws JsonProcessingException {
if (clientSideRequestStatistics == null || context == null) {
return;
}
Map<String, Object> attributes;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator = storeResponseStatistics.getStoreResult().getStoreResponseDiagnostics().getRequestTimeline().iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
for (RequestTimeline.Event event :
statistics.getStoreResult().getStoreResponseDiagnostics().getRequestTimeline()) {
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : clientSideRequestStatistics.getGatewayStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(gatewayStats));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (gatewayStats.getRequestTimeline() != null) {
for (RequestTimeline.Event event : gatewayStats.getRequestTimeline()) {
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getContactedRegionNames()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientConfig()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
if (clientSideRequestStatistics.getResponseStatisticsList() != null && clientSideRequestStatistics.getResponseStatisticsList().size() > 0
&& clientSideRequestStatistics.getResponseStatisticsList().iterator().next() != null) {
String eventName =
"Diagnostics for PKRange "
+ clientSideRequestStatistics.getResponseStatisticsList().iterator().next().getStoreResult().getStoreResponseDiagnostics().getPartitionKeyRangeId();
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
} else if (clientSideRequestStatistics.getGatewayStatisticsList() != null && clientSideRequestStatistics.getGatewayStatisticsList().size() > 0) {
String eventName =
"Diagnostics for PKRange " + clientSideRequestStatistics.getGatewayStatisticsList().get(0).getPartitionKeyRangeId();
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
} else {
String eventName = "Diagnostics ";
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null || context == null) {
return;
}
Map<String, Object> attributes;
FeedResponseDiagnostics feedResponseDiagnostics =
diagnosticsAccessor.getFeedResponseDiagnostics(cosmosDiagnostics);
if (feedResponseDiagnostics != null) {
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics = feedResponseDiagnostics
.getQueryPlanDiagnosticsContext();
if (queryPlanDiagnostics != null) {
attributes = new HashMap<>();
attributes.put("JSON",
mapper.writeValueAsString(queryPlanDiagnostics));
this.addEvent(
"Query Plan Statistics",
attributes,
OffsetDateTime.ofInstant(queryPlanDiagnostics.getStartTimeUTC(), ZoneOffset.UTC),
context);
}
Map<String, QueryMetrics> queryMetrics = feedResponseDiagnostics.getQueryMetricsMap();
if (queryMetrics != null && queryMetrics.size() > 0) {
for(Map.Entry<String, QueryMetrics> entry : queryMetrics.entrySet()) {
attributes = new HashMap<>();
attributes.put("Query Metrics", entry.getValue().toString());
this.addEvent("Query Metrics for PKRange " + entry.getKey(), attributes,
OffsetDateTime.now(), context);
}
}
for (ClientSideRequestStatistics c: feedResponseDiagnostics.getClientSideRequestStatistics()) {
addClientSideRequestStatisticsOnTracerEvent(c, context);
}
}
addClientSideRequestStatisticsOnTracerEvent(
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics),
context);
}
void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
}
private static void emitDiagnosticsEvents(Tracer tracer, CosmosDiagnosticsContext cosmosCtx, String trigger, Context context) {
Map<String, Object> attributes = new HashMap<>();
String message = trigger + " - CTX: " + cosmosCtx.toJson();
List<String> messageFragments = Splitter.fixedLength(Configs.getMaxTraceMessageLength()).splitToList(message);
attributes.put("Trigger", trigger);
for (int i = 0; i < messageFragments.size(); i++) {
attributes.put("SequenceNumber", String.format(Locale.ROOT,"%05d", i + 1));
tracer.addEvent(messageFragments.get(i), attributes, OffsetDateTime.now(), context);
}
}
private final static class OpenTelemetryCosmosTracer implements CosmosTracer {
private final Tracer tracer;
private final CosmosClientTelemetryConfig config;
private final String clientId;
private final String connectionMode;
private final String userAgent;
public OpenTelemetryCosmosTracer(
Tracer tracer,
CosmosClientTelemetryConfig config,
String clientId,
String userAgent,
String connectionMode) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
checkNotNull(config, "Argument 'config' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.tracer = tracer;
this.config = config;
this.clientId = clientId;
this.userAgent = userAgent;
this.connectionMode = connectionMode;
}
private boolean isTransportLevelTracingEnabled() {
return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config);
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx);
return tracer.start(spanName, spanOptions, local);
}
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) {
StartSpanOptions spanOptions;
if (tracer instanceof EnabledNoOpTracer) {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL);
} else {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL)
.setAttribute("db.system", "cosmosdb")
.setAttribute("db.operation", spanName)
.setAttribute("net.peer.name", cosmosCtx.getAccountName())
.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType())
.setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType())
.setAttribute("db.name", cosmosCtx.getDatabaseName())
.setAttribute("db.cosmosdb.client_id", this.clientId)
.setAttribute("user_agent.original", this.userAgent)
.setAttribute("db.cosmosdb.connection_mode", this.connectionMode);
if (!cosmosCtx.getOperationId().isEmpty() &&
!cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) {
spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId());
}
String containerName = cosmosCtx.getContainerName();
if (containerName != null) {
spanOptions.setAttribute("db.cosmosdb.container", containerName);
}
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
if (cosmosCtx == null) {
return;
}
if (!cosmosCtx.isCompleted()) {
tracer.end("CosmosCtx not completed yet.", null, context);
return;
}
String errorMessage = null;
Throwable finalError = cosmosCtx.getFinalError();
if (finalError != null && cosmosCtx.isFailure()) {
if (finalError instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) finalError;
errorMessage = cosmosException.getShortMessage();
} else {
errorMessage = finalError.getMessage();
}
}
if (tracer instanceof EnabledNoOpTracer) {
tracer.end(errorMessage, finalError, context);
return;
}
if (isEmptyCompletion) {
tracer.setAttribute(
"db.cosmosdb.is_empty_completion",
Boolean.toString(true),
context);
tracer.end(errorMessage, finalError, context);
return;
}
if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) {
String trigger;
if (cosmosCtx.isFailure()) {
trigger = "Failure";
} else {
trigger = "ThresholdViolation";
}
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
}
if (finalError != null) {
String exceptionType;
if (finalError instanceof CosmosException) {
exceptionType = CosmosException.class.getCanonicalName();
} else {
exceptionType = finalError.getClass().getCanonicalName();
}
tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context);
tracer.setAttribute("exception.type", exceptionType, context);
if (errorMessage != null) {
tracer.setAttribute("exception.message", errorMessage, context);
}
tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context);
}
if (this.isTransportLevelTracingEnabled()) {
traceTransportLevel(cosmosCtx, context);
}
tracer.setAttribute(
"db.cosmosdb.status_code",
Integer.toString(cosmosCtx.getStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.sub_status_code",
Integer.toString(cosmosCtx.getSubStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.request_charge",
Float.toString(cosmosCtx.getTotalRequestCharge()),
context);
tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context);
Set<String> regionsContacted = cosmosCtx.getContactedRegionNames();
if (!regionsContacted.isEmpty()) {
tracer.setAttribute(
"db.cosmosdb.regions_contacted",
String.join(", ", regionsContacted),
context);
}
tracer.end(errorMessage, finalError, context);
}
private void recordStoreResponseStatistics(
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics,
Context context) {
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Map<String, Object> attributes = new HashMap<>();
attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString());
attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString());
attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString());
attributes.put("rntbd.region", responseStatistics.getRegionName());
if (storeResultDiagnostics.getLsn() > 0) {
attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn()));
}
if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) {
attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN()));
}
String responseSessionToken = responseStatistics.getRequestSessionToken();
if (responseSessionToken != null && !responseSessionToken.isEmpty()) {
attributes.put("rntbd.session_token", responseSessionToken);
}
String requestSessionToken = responseStatistics.getRequestSessionToken();
if (requestSessionToken != null && !requestSessionToken.isEmpty()) {
attributes.put("rntbd.request_session_token", requestSessionToken);
}
String activityId = storeResponseDiagnostics.getActivityId();
if (activityId != null && !activityId.isEmpty()) {
attributes.put("rntbd.activity_id", activityId);
}
String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
if (pkRangeId != null && !pkRangeId.isEmpty()) {
attributes.put("rntbd.partition_key_range_id", pkRangeId);
}
attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode()));
if (storeResponseDiagnostics.getSubStatusCode() != 0) {
attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode()));
}
if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) {
attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId());
}
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
attributes.put("rntbd.backend_latency", Double.toString(backendLatency));
}
double requestCharge = storeResponseDiagnostics.getRequestCharge();
attributes.put("rntbd.request_charge", Double.toString(requestCharge));
Duration latency = responseStatistics.getDuration();
if (latency != null) {
attributes.put("rntbd.latency", latency.toString());
}
if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) {
attributes.put(
"rntbd.is_new_channel",
storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit());
}
OffsetDateTime startTime = null;
for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) {
OffsetDateTime eventTime = event.getStartTime() != null ?
event.getStartTime().atOffset(ZoneOffset.UTC) : null;
if (eventTime != null &&
(startTime == null || startTime.isBefore(eventTime))) {
startTime = eventTime;
}
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString());
}
attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength());
attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength());
this.tracer.addEvent(
"rntbd.request",
attributes,
startTime != null ? startTime : OffsetDateTime.now(),
context);
}
}
private void traceTransportLevelRequests(
Collection<ClientSideRequestStatistics> clientSideRequestStatistics,
Context context) {
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
requestStatistics.getResponseStatisticsList(),
context);
recordStoreResponseStatistics(
requestStatistics.getSupplementalResponseStatisticsList(),
context);
}
}
}
private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) {
Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics =
ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext);
traceTransportLevelRequests(
combinedClientSideRequestStatistics,
context);
}
}
public static String prettifyCallstack(Throwable e) {
StringWriter stackWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stackWriter);
e.printStackTrace(printWriter);
printWriter.flush();
stackWriter.flush();
String prettifiedCallstack = stackWriter.toString();
String message = e.toString();
if (prettifiedCallstack.length() > message.length()) {
prettifiedCallstack = prettifiedCallstack.substring(message.length());
}
printWriter.close();
try {
stackWriter.close();
} catch (IOException closeError) {
LOGGER.warn("Error trying to close StringWriter.", closeError);
}
return prettifiedCallstack;
}
private void handleErrors(Throwable throwable, int systemExitCode) {
if (throwable instanceof Error) {
LOGGER.error("Unexpected error in DiagnosticsProvider.endSpan. ", throwable);
System.err.println("Unexpected error in DiagnosticsProvider.endSpan. " + throwable);
System.exit(systemExitCode);
} else {
LOGGER.error("Unexpected exception in DiagnosticsProvider.endSpan. ", throwable);
throw new RuntimeException(throwable);
}
}
private static final class EnabledNoOpTracer implements Tracer {
public static final Tracer INSTANCE = new EnabledNoOpTracer();
private EnabledNoOpTracer() {
}
@Override
public Context start(String methodName, Context context) {
return context;
}
@Override
public Context start(String methodName, Context context, ProcessKind processKind) {
return context;
}
@Override
public void end(int responseCode, Throwable error, Context context) {
}
@Override
public void end(String errorCondition, Throwable error, Context context) {
}
@Override
public void setAttribute(String key, String value, Context context) {
}
@Override
public Context setSpanName(String spanName, Context context) {
return Context.NONE;
}
@Override
public void addLink(Context context) {
}
@Override
public Context extractContext(String diagnosticId, Context context) {
return Context.NONE;
}
@Override
public Context getSharedSpanBuilder(String spanName, Context context) {
return Context.NONE;
}
}
} | class DiagnosticsProvider {
private static final ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor clientTelemetryConfigAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private static final ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final Logger LOGGER = LoggerFactory.getLogger(DiagnosticsProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String LEGACY_DB_URL = "db.url";
public static final String LEGACY_DB_STATEMENT = "db.statement";
public final static String LEGACY_DB_INSTANCE = "db.instance";
private final static Duration FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD = Duration.ofMillis(5);
private static final String REACTOR_TRACING_CONTEXT_KEY = "tracing-context";
private static final String COSMOS_DIAGNOSTICS_CONTEXT_KEY = "azure-cosmos-context";
private static final Object DUMMY_VALUE = new Object();
private final Mono<Object> propagatingMono;
private final Flux<Object> propagatingFlux;
private final ArrayList<CosmosDiagnosticsHandler> diagnosticHandlers;
private final Tracer tracer;
private final CosmosTracer cosmosTracer;
private final CosmosClientTelemetryConfig telemetryConfig;
private final boolean shouldSystemExitOnError;
public DiagnosticsProvider(
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientId,
String userAgent,
ConnectionMode connectionMode) {
checkNotNull(clientTelemetryConfig, "Argument 'clientTelemetryConfig' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.telemetryConfig = clientTelemetryConfig;
this.diagnosticHandlers = new ArrayList<>(
clientTelemetryConfigAccessor.getDiagnosticHandlers(clientTelemetryConfig));
Tracer tracerCandidate = clientTelemetryConfigAccessor.getOrCreateTracer(clientTelemetryConfig);
if (tracerCandidate.isEnabled()) {
this.tracer = tracerCandidate;
} else {
if (!this.diagnosticHandlers.isEmpty()) {
this.tracer = EnabledNoOpTracer.INSTANCE;
} else {
this.tracer = tracerCandidate;
}
}
if (this.tracer.isEnabled()) {
if (clientTelemetryConfigAccessor.isLegacyTracingEnabled(clientTelemetryConfig)) {
this.cosmosTracer = new LegacyCosmosTracer(this.tracer);
} else {
this.cosmosTracer = new OpenTelemetryCosmosTracer(
this.tracer,
clientTelemetryConfig,
clientId,
userAgent,
connectionMode.name().toLowerCase(Locale.ROOT));
}
} else {
this.cosmosTracer = null;
}
this.propagatingMono = new PropagatingMono();
this.propagatingFlux = new PropagatingFlux();
this.shouldSystemExitOnError = Configs.shouldDiagnosticsProviderSystemExitOnError();
}
public boolean isEnabled() {
return this.tracer.isEnabled();
}
public boolean isRealTracer() {
return this.tracer.isEnabled() && this.tracer != EnabledNoOpTracer.INSTANCE;
}
public String getTraceConfigLog() {
StringBuilder sb = new StringBuilder();
sb.append(this.isEnabled());
sb.append(", ");
sb.append(this.isRealTracer());
sb.append(", ");
sb.append(this.tracer.getClass().getCanonicalName());
if (!this.diagnosticHandlers.isEmpty()) {
sb.append(", [");
for (int i = 0; i < this.diagnosticHandlers.size(); i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(this.diagnosticHandlers.get(i).getClass().getCanonicalName());
}
sb.append("]");
}
return sb.toString();
}
public CosmosClientTelemetryConfig getClientTelemetryConfig() {
return this.telemetryConfig;
}
/**
* Gets {@link Context} from Reactor {@link ContextView}.
*
* @param reactorContext Reactor context instance.
* @return {@link Context} from reactor context or null if not present.
*/
public static Context getContextFromReactorOrNull(ContextView reactorContext) {
Object context = reactorContext.getOrDefault(REACTOR_TRACING_CONTEXT_KEY, null);
if (context instanceof Context) {
return (Context) context;
}
return null;
}
/**
* Stores {@link Context} in Reactor {@link reactor.util.context.Context}.
*
* @param traceContext {@link Context} context with trace context to store.
* @return {@link reactor.util.context.Context} Reactor context with trace context.
*/
public static reactor.util.context.Context setContextInReactor(Context traceContext) {
return reactor.util.context.Context.of(REACTOR_TRACING_CONTEXT_KEY, traceContext);
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <br/>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(
String spanName,
CosmosDiagnosticsContext cosmosCtx,
Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
ctxAccessor.startOperation(cosmosCtx);
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
if (this.cosmosTracer == null) {
return local;
}
return this.cosmosTracer.startSpan(spanName, cosmosCtx, local);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T> void endSpan(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics
) {
try {
this.endSpanCore(signal, cosmosCtx, statusCode, actualItemCount, requestCharge, diagnostics);
} catch (Throwable error) {
this.handleErrors(error, 9901);
}
}
private <T> void endSpanCore(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics
) {
Objects.requireNonNull(signal, "'signal' cannot be null.");
Context context = getContextFromReactorOrNull(signal.getContextView());
if (context == null) {
return;
}
switch (signal.getType()) {
case ON_COMPLETE:
end(
cosmosCtx,
statusCode,
0,
actualItemCount,
requestCharge,
diagnostics,
null,
context,
ctxAccessor.isEmptyCompletion(cosmosCtx));
break;
case ON_NEXT:
end(
cosmosCtx,
statusCode,
0,
actualItemCount,
requestCharge,
diagnostics,
null,
context,
false);
break;
case ON_ERROR:
Throwable throwable = null;
int subStatusCode = 0;
Double effectiveRequestCharge = requestCharge;
CosmosDiagnostics effectiveDiagnostics = diagnostics;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
subStatusCode = exception.getSubStatusCode();
if (effectiveRequestCharge != null) {
effectiveRequestCharge += exception.getRequestCharge();
} else {
effectiveRequestCharge = exception.getRequestCharge();
}
effectiveDiagnostics = exception.getDiagnostics();
if (effectiveDiagnostics != null) {
diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(effectiveDiagnostics).set(true);
}
}
}
end(
cosmosCtx,
statusCode,
subStatusCode,
actualItemCount,
effectiveRequestCharge,
effectiveDiagnostics,
throwable,
context,
false);
break;
default:
break;
}
}
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, Throwable throwable) {
try {
int statusCode = DiagnosticsProvider.ERROR_CODE;
int subStatusCode = 0;
Double effectiveRequestCharge = null;
CosmosDiagnostics effectiveDiagnostics = null;
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
subStatusCode = exception.getSubStatusCode();
effectiveRequestCharge = exception.getRequestCharge();
effectiveDiagnostics = exception.getDiagnostics();
}
end(
cosmosCtx,
statusCode,
subStatusCode,
null,
effectiveRequestCharge,
effectiveDiagnostics,
throwable,
context,
false);
} catch (Throwable error) {
this.handleErrors(error, 9905);
}
}
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isForcedEmptyCompletion) {
try {
end(
cosmosCtx,
200,
0,
null,
null,
null,
null,
context,
isForcedEmptyCompletion);
} catch (Throwable error) {
this.handleErrors(error, 9904);
}
}
public void recordPage(
CosmosDiagnosticsContext cosmosCtx,
CosmosDiagnostics diagnostics,
Integer actualItemCount,
Double requestCharge
) {
try {
this.recordPageCore(cosmosCtx, diagnostics, actualItemCount, requestCharge);
} catch (Throwable error) {
this.handleErrors(error, 9902);
}
}
private void recordPageCore(
CosmosDiagnosticsContext cosmosCtx,
CosmosDiagnostics diagnostics,
Integer actualItemCount,
Double requestCharge
) {
ctxAccessor.recordOperation(
cosmosCtx, 200, 0, actualItemCount, requestCharge, diagnostics, null);
}
public <T> void recordFeedResponseConsumerLatency(
Signal<T> signal,
CosmosDiagnosticsContext cosmosCtx,
Duration feedResponseConsumerLatency
) {
try {
Objects.requireNonNull(signal, "'signal' cannot be null.");
Objects.requireNonNull(feedResponseConsumerLatency, "'feedResponseConsumerLatency' cannot be null.");
checkArgument(
signal.getType() == SignalType.ON_COMPLETE || signal.getType() == SignalType.ON_ERROR,
"recordFeedResponseConsumerLatency should only be used for terminal signal");
Context context = getContextFromReactorOrNull(signal.getContextView());
if (context == null) {
return;
}
this.recordFeedResponseConsumerLatencyCore(
context, cosmosCtx, feedResponseConsumerLatency);
} catch (Throwable error) {
this.handleErrors(error, 9903);
}
}
private void recordFeedResponseConsumerLatencyCore(
Context context,
CosmosDiagnosticsContext cosmosCtx,
Duration feedResponseConsumerLatency
) {
Objects.requireNonNull(cosmosCtx, "'cosmosCtx' cannot be null.");
Objects.requireNonNull(feedResponseConsumerLatency, "'feedResponseConsumerLatency' cannot be null.");
if (feedResponseConsumerLatency.compareTo(FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD) <= 0 &&
!LOGGER.isDebugEnabled()) {
return;
}
if (feedResponseConsumerLatency.compareTo(FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD) <= 0 &&
LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Total duration spent in FeedResponseConsumer is {} but does not exceed threshold of {}, Diagnostics: {}",
feedResponseConsumerLatency,
FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD,
cosmosCtx);
return;
}
if (context != null && this.isRealTracer()) {
Map<String, Object> attributes = new HashMap<>();
String trigger = "SlowFeedResponse";
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
return;
}
LOGGER.warn(
"Total duration spent in FeedResponseConsumer is {} and exceeds threshold of {}, Diagnostics: {}",
feedResponseConsumerLatency,
FEED_RESPONSE_CONSUMER_LATENCY_THRESHOLD,
cosmosCtx);
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(
Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String containerId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions) {
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
null,
null,
(r) -> r.getStatusCode(),
(r) -> null,
(r) -> r.getRequestCharge(),
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
public <T extends CosmosBatchResponse> Mono<T> traceEnabledBatchResponsePublisher(
Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String containerId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions) {
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
null,
null,
CosmosBatchResponse::getStatusCode,
(r) -> null,
CosmosBatchResponse::getRequestCharge,
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(
Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
RequestOptions requestOptions,
String trackingId) {
checkNotNull(requestOptions, "Argument 'requestOptions' must not be null.");
checkNotNull(client, "Argument 'client' must not be null.");
String accountName = clientAccessor.getAccountTagValue(client);
return publisherWithDiagnostics(
resultPublisher,
context,
spanName,
containerId,
databaseId,
accountName,
client,
consistencyLevel,
operationType,
resourceType,
trackingId,
null,
CosmosItemResponse::getStatusCode,
(r) -> null,
CosmosItemResponse::getRequestCharge,
(r, samplingRate) -> {
CosmosDiagnostics diagnostics = r.getDiagnostics();
if (diagnostics != null) {
diagnosticsAccessor.setSamplingRateSnapshot(diagnostics, samplingRate);
}
return diagnostics;
},
requestOptions);
}
/**
* Runs given {@code Flux<T>} publisher in the scope of trace context passed in using
* {@link DiagnosticsProvider
* Populates active trace context on Reactor's hot path. Reactor's instrumentation for OpenTelemetry
* (or other hypothetical solution) will take care of the cold path.
*
* @param publisher publisher to run.
* @return wrapped publisher.
*/
public <T> Flux<T> runUnderSpanInContext(Flux<T> publisher) {
return propagatingFlux.flatMap(ignored -> publisher);
}
public boolean shouldSampleOutOperation(CosmosPagedFluxOptions options) {
final double samplingRateSnapshot = clientTelemetryConfigAccessor.getSamplingRate(this.telemetryConfig);
options.setSamplingRateSnapshot(samplingRateSnapshot);
return shouldSampleOutOperation(samplingRateSnapshot);
}
private boolean shouldSampleOutOperation(double samplingRate) {
if (samplingRate == 1) {
return false;
}
if (samplingRate == 0) {
return true;
}
return ThreadLocalRandom.current().nextDouble() >= samplingRate;
}
private <T> Mono<T> diagnosticsEnabledPublisher(
CosmosDiagnosticsContext cosmosCtx,
Mono<T> resultPublisher,
Context context,
String spanName,
Function<T, Integer> statusCodeFunc,
Function<T, Integer> actualItemCountFunc,
Function<T, Double> requestChargeFunc,
BiFunction<T, Double, CosmosDiagnostics> diagnosticsFunc
) {
if (!isEnabled()) {
return resultPublisher;
}
final double samplingRateSnapshot = clientTelemetryConfigAccessor.getSamplingRate(this.telemetryConfig);
if (cosmosCtx != null) {
ctxAccessor.setSamplingRateSnapshot(cosmosCtx, samplingRateSnapshot);
}
if (shouldSampleOutOperation(samplingRateSnapshot)) {
return resultPublisher;
}
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
if (isNestedCall) {
return resultPublisher;
}
return propagatingMono
.flatMap(ignored -> resultPublisher)
.doOnEach(signal -> {
switch (signal.getType()) {
case ON_NEXT:
T response = signal.get();
this.endSpan(
signal,
cosmosCtx,
statusCodeFunc.apply(response),
actualItemCountFunc.apply(response),
requestChargeFunc.apply(response),
diagnosticsFunc.apply(response, samplingRateSnapshot));
break;
case ON_ERROR:
this.endSpan(
signal,
cosmosCtx,
ERROR_CODE,
null,
null,
null);
break;
default:
break;
}})
.contextWrite(setContextInReactor(this.startSpan(spanName, cosmosCtx, context)));
}
private <T> Mono<T> publisherWithDiagnostics(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String accountName,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
String trackingId,
Integer maxItemCount,
Function<T, Integer> statusCodeFunc,
Function<T, Integer> actualItemCountFunc,
Function<T, Double> requestChargeFunc,
BiFunction<T, Double, CosmosDiagnostics> diagnosticFunc,
RequestOptions requestOptions) {
CosmosDiagnosticsThresholds thresholds = requestOptions != null
? clientAccessor.getEffectiveDiagnosticsThresholds(client, requestOptions.getDiagnosticsThresholds())
: clientAccessor.getEffectiveDiagnosticsThresholds(client, null);
CosmosDiagnosticsContext cosmosCtx = ctxAccessor.create(
spanName,
accountName,
BridgeInternal.getServiceEndpoint(client),
databaseId,
containerId,
resourceType,
operationType,
null,
clientAccessor.getEffectiveConsistencyLevel(client, operationType, consistencyLevel),
maxItemCount,
thresholds,
trackingId,
clientAccessor.getConnectionMode(client),
clientAccessor.getUserAgent(client),
null);
if (requestOptions != null) {
requestOptions.setDiagnosticsContextSupplier(() -> cosmosCtx);
}
return diagnosticsEnabledPublisher(
cosmosCtx,
resultPublisher,
context,
spanName,
statusCodeFunc,
actualItemCountFunc,
requestChargeFunc,
diagnosticFunc);
}
private void end(
CosmosDiagnosticsContext cosmosCtx,
int statusCode,
int subStatusCode,
Integer actualItemCount,
Double requestCharge,
CosmosDiagnostics diagnostics,
Throwable throwable,
Context context,
boolean isForcedEmptyCompletion) {
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
if (ctxAccessor.endOperation(
cosmosCtx,
statusCode,
subStatusCode,
actualItemCount,
requestCharge,
diagnostics,
throwable)) {
if (!isForcedEmptyCompletion) {
this.handleDiagnostics(context, cosmosCtx);
}
if (this.cosmosTracer != null) {
this.cosmosTracer.endSpan(cosmosCtx, context, isForcedEmptyCompletion);
}
}
}
private static void subscribe(Tracer tracer, CoreSubscriber<? super Object> actual) {
Context context = getContextFromReactorOrNull(actual.currentContext());
if (context != null) {
AutoCloseable scope = tracer.makeSpanCurrent(context);
try {
actual.onSubscribe(Operators.scalarSubscription(actual, DUMMY_VALUE));
} finally {
try {
scope.close();
} catch (Exception e) {
LOGGER.error("Unexpected failure closing tracer scope.", e);
throw new IllegalStateException("Unexpected failure closing tracer scope.", e);
}
}
} else {
actual.onSubscribe(Operators.scalarSubscription(actual, DUMMY_VALUE));
}
}
/**
* Helper class allowing running Mono subscription (and anything on the hot path)
* in scope of trace context. This enables OpenTelemetry auto-collection
* to pick it up and correlate lower levels of instrumentation and logs
* to logical Cosmos spans.
* <br/>
* OpenTelemetry reactor auto-instrumentation will take care of the cold path.
*/
private final class PropagatingMono extends Mono<Object> {
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
DiagnosticsProvider.subscribe(tracer, actual);
}
}
/**
* Helper class allowing running Flux subscription (and anything on the hot path)
* in scope of trace context. This enables OpenTelemetry auto-collection
* to pick it up and correlate lower levels of instrumentation and logs
* to logical Cosmos spans.
* <br/>
* OpenTelemetry reactor auto-instrumentation will take care of the cold path.
*/
private final class PropagatingFlux extends Flux<Object> {
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
DiagnosticsProvider.subscribe(tracer, actual);
}
}
private interface CosmosTracer {
Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context);
void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion);
}
private static final class LegacyCosmosTracer implements CosmosTracer {
private final static String JSON_STRING = "JSON";
private final Tracer tracer;
public LegacyCosmosTracer(Tracer tracer) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
this.tracer = tracer;
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx.getDatabaseName(),
ctxAccessor.getEndpoint(cosmosCtx));
return tracer.start(spanName, spanOptions, context);
}
private StartSpanOptions startSpanOptions(String methodName, String databaseId, String endpoint) {
StartSpanOptions spanOptions = new StartSpanOptions(SpanKind.CLIENT)
.setAttribute(DB_TYPE, DB_TYPE_VALUE)
.setAttribute(LEGACY_DB_URL, endpoint)
.setAttribute(LEGACY_DB_STATEMENT, methodName);
if (databaseId != null) {
spanOptions.setAttribute(LEGACY_DB_INSTANCE, databaseId);
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
try {
if (cosmosCtx != null && cosmosCtx.isThresholdViolated()) {
Collection<CosmosDiagnostics> diagnostics = cosmosCtx.getDiagnostics();
if (diagnostics != null && diagnostics.size() > 0) {
for (CosmosDiagnostics d: diagnostics) {
addDiagnosticsOnTracerEvent(d, context);
}
}
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer.", ex);
}
if (cosmosCtx != null) {
tracer.end(cosmosCtx.getStatusCode(), cosmosCtx.getFinalError(), context);
}
}
private void addClientSideRequestStatisticsOnTracerEvent(
ClientSideRequestStatistics clientSideRequestStatistics,
Context context) throws JsonProcessingException {
if (clientSideRequestStatistics == null || context == null) {
return;
}
Map<String, Object> attributes;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator = storeResponseStatistics.getStoreResult().getStoreResponseDiagnostics().getRequestTimeline().iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
for (RequestTimeline.Event event :
statistics.getStoreResult().getStoreResponseDiagnostics().getRequestTimeline()) {
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : clientSideRequestStatistics.getGatewayStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(gatewayStats));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (gatewayStats.getRequestTimeline() != null) {
for (RequestTimeline.Event event : gatewayStats.getRequestTimeline()) {
if (event.getName().equals(CREATED.getEventName())) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getContactedRegionNames()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientConfig()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
if (clientSideRequestStatistics.getResponseStatisticsList() != null && clientSideRequestStatistics.getResponseStatisticsList().size() > 0
&& clientSideRequestStatistics.getResponseStatisticsList().iterator().next() != null) {
String eventName =
"Diagnostics for PKRange "
+ clientSideRequestStatistics.getResponseStatisticsList().iterator().next().getStoreResult().getStoreResponseDiagnostics().getPartitionKeyRangeId();
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
} else if (clientSideRequestStatistics.getGatewayStatisticsList() != null && clientSideRequestStatistics.getGatewayStatisticsList().size() > 0) {
String eventName =
"Diagnostics for PKRange " + clientSideRequestStatistics.getGatewayStatisticsList().get(0).getPartitionKeyRangeId();
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
} else {
String eventName = "Diagnostics ";
this.addEvent(eventName, attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null || context == null) {
return;
}
Map<String, Object> attributes;
FeedResponseDiagnostics feedResponseDiagnostics =
diagnosticsAccessor.getFeedResponseDiagnostics(cosmosDiagnostics);
if (feedResponseDiagnostics != null) {
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics = feedResponseDiagnostics
.getQueryPlanDiagnosticsContext();
if (queryPlanDiagnostics != null) {
attributes = new HashMap<>();
attributes.put("JSON",
mapper.writeValueAsString(queryPlanDiagnostics));
this.addEvent(
"Query Plan Statistics",
attributes,
OffsetDateTime.ofInstant(queryPlanDiagnostics.getStartTimeUTC(), ZoneOffset.UTC),
context);
}
Map<String, QueryMetrics> queryMetrics = feedResponseDiagnostics.getQueryMetricsMap();
if (queryMetrics != null && queryMetrics.size() > 0) {
for(Map.Entry<String, QueryMetrics> entry : queryMetrics.entrySet()) {
attributes = new HashMap<>();
attributes.put("Query Metrics", entry.getValue().toString());
this.addEvent("Query Metrics for PKRange " + entry.getKey(), attributes,
OffsetDateTime.now(), context);
}
}
for (ClientSideRequestStatistics c: feedResponseDiagnostics.getClientSideRequestStatistics()) {
addClientSideRequestStatisticsOnTracerEvent(c, context);
}
}
addClientSideRequestStatisticsOnTracerEvent(
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics),
context);
}
void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
}
private static void emitDiagnosticsEvents(Tracer tracer, CosmosDiagnosticsContext cosmosCtx, String trigger, Context context) {
Map<String, Object> attributes = new HashMap<>();
String message = trigger + " - CTX: " + cosmosCtx.toJson();
List<String> messageFragments = Splitter.fixedLength(Configs.getMaxTraceMessageLength()).splitToList(message);
attributes.put("Trigger", trigger);
for (int i = 0; i < messageFragments.size(); i++) {
attributes.put("SequenceNumber", String.format(Locale.ROOT,"%05d", i + 1));
tracer.addEvent(messageFragments.get(i), attributes, OffsetDateTime.now(), context);
}
}
private final static class OpenTelemetryCosmosTracer implements CosmosTracer {
private final Tracer tracer;
private final CosmosClientTelemetryConfig config;
private final String clientId;
private final String connectionMode;
private final String userAgent;
public OpenTelemetryCosmosTracer(
Tracer tracer,
CosmosClientTelemetryConfig config,
String clientId,
String userAgent,
String connectionMode) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
checkNotNull(config, "Argument 'config' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.tracer = tracer;
this.config = config;
this.clientId = clientId;
this.userAgent = userAgent;
this.connectionMode = connectionMode;
}
private boolean isTransportLevelTracingEnabled() {
return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config);
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx);
return tracer.start(spanName, spanOptions, local);
}
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) {
StartSpanOptions spanOptions;
if (tracer instanceof EnabledNoOpTracer) {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL);
} else {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL)
.setAttribute("db.system", "cosmosdb")
.setAttribute("db.operation", spanName)
.setAttribute("net.peer.name", cosmosCtx.getAccountName())
.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType())
.setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType())
.setAttribute("db.name", cosmosCtx.getDatabaseName())
.setAttribute("db.cosmosdb.client_id", this.clientId)
.setAttribute("user_agent.original", this.userAgent)
.setAttribute("db.cosmosdb.connection_mode", this.connectionMode);
if (!cosmosCtx.getOperationId().isEmpty() &&
!cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) {
spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId());
}
String containerName = cosmosCtx.getContainerName();
if (containerName != null) {
spanOptions.setAttribute("db.cosmosdb.container", containerName);
}
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
if (cosmosCtx == null) {
return;
}
if (!cosmosCtx.isCompleted()) {
tracer.end("CosmosCtx not completed yet.", null, context);
return;
}
String errorMessage = null;
Throwable finalError = cosmosCtx.getFinalError();
if (finalError != null && cosmosCtx.isFailure()) {
if (finalError instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) finalError;
errorMessage = cosmosException.getShortMessage();
} else {
errorMessage = finalError.getMessage();
}
}
if (tracer instanceof EnabledNoOpTracer) {
tracer.end(errorMessage, finalError, context);
return;
}
if (isEmptyCompletion) {
tracer.setAttribute(
"db.cosmosdb.is_empty_completion",
Boolean.toString(true),
context);
tracer.end(errorMessage, finalError, context);
return;
}
if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) {
String trigger;
if (cosmosCtx.isFailure()) {
trigger = "Failure";
} else {
trigger = "ThresholdViolation";
}
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
}
if (finalError != null) {
String exceptionType;
if (finalError instanceof CosmosException) {
exceptionType = CosmosException.class.getCanonicalName();
} else {
exceptionType = finalError.getClass().getCanonicalName();
}
tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context);
tracer.setAttribute("exception.type", exceptionType, context);
if (errorMessage != null) {
tracer.setAttribute("exception.message", errorMessage, context);
}
tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context);
}
if (this.isTransportLevelTracingEnabled()) {
traceTransportLevel(cosmosCtx, context);
}
tracer.setAttribute(
"db.cosmosdb.status_code",
Integer.toString(cosmosCtx.getStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.sub_status_code",
Integer.toString(cosmosCtx.getSubStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.request_charge",
Float.toString(cosmosCtx.getTotalRequestCharge()),
context);
tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context);
Set<String> regionsContacted = cosmosCtx.getContactedRegionNames();
if (!regionsContacted.isEmpty()) {
tracer.setAttribute(
"db.cosmosdb.regions_contacted",
String.join(", ", regionsContacted),
context);
}
tracer.end(errorMessage, finalError, context);
}
private void recordStoreResponseStatistics(
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics,
Context context) {
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Map<String, Object> attributes = new HashMap<>();
attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString());
attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString());
attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString());
attributes.put("rntbd.region", responseStatistics.getRegionName());
if (storeResultDiagnostics.getLsn() > 0) {
attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn()));
}
if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) {
attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN()));
}
String responseSessionToken = responseStatistics.getRequestSessionToken();
if (responseSessionToken != null && !responseSessionToken.isEmpty()) {
attributes.put("rntbd.session_token", responseSessionToken);
}
String requestSessionToken = responseStatistics.getRequestSessionToken();
if (requestSessionToken != null && !requestSessionToken.isEmpty()) {
attributes.put("rntbd.request_session_token", requestSessionToken);
}
String activityId = storeResponseDiagnostics.getActivityId();
if (activityId != null && !activityId.isEmpty()) {
attributes.put("rntbd.activity_id", activityId);
}
String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
if (pkRangeId != null && !pkRangeId.isEmpty()) {
attributes.put("rntbd.partition_key_range_id", pkRangeId);
}
attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode()));
if (storeResponseDiagnostics.getSubStatusCode() != 0) {
attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode()));
}
if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) {
attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId());
}
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
attributes.put("rntbd.backend_latency", Double.toString(backendLatency));
}
double requestCharge = storeResponseDiagnostics.getRequestCharge();
attributes.put("rntbd.request_charge", Double.toString(requestCharge));
Duration latency = responseStatistics.getDuration();
if (latency != null) {
attributes.put("rntbd.latency", latency.toString());
}
if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) {
attributes.put(
"rntbd.is_new_channel",
storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit());
}
OffsetDateTime startTime = null;
for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) {
OffsetDateTime eventTime = event.getStartTime() != null ?
event.getStartTime().atOffset(ZoneOffset.UTC) : null;
if (eventTime != null &&
(startTime == null || startTime.isBefore(eventTime))) {
startTime = eventTime;
}
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString());
}
attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength());
attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength());
this.tracer.addEvent(
"rntbd.request",
attributes,
startTime != null ? startTime : OffsetDateTime.now(),
context);
}
}
private void traceTransportLevelRequests(
Collection<ClientSideRequestStatistics> clientSideRequestStatistics,
Context context) {
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
requestStatistics.getResponseStatisticsList(),
context);
recordStoreResponseStatistics(
requestStatistics.getSupplementalResponseStatisticsList(),
context);
}
}
}
private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) {
Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics =
ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext);
traceTransportLevelRequests(
combinedClientSideRequestStatistics,
context);
}
}
public static String prettifyCallstack(Throwable e) {
StringWriter stackWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stackWriter);
e.printStackTrace(printWriter);
printWriter.flush();
stackWriter.flush();
String prettifiedCallstack = stackWriter.toString();
String message = e.toString();
if (prettifiedCallstack.length() > message.length()) {
prettifiedCallstack = prettifiedCallstack.substring(message.length());
}
printWriter.close();
try {
stackWriter.close();
} catch (IOException closeError) {
LOGGER.warn("Error trying to close StringWriter.", closeError);
}
return prettifiedCallstack;
}
private void handleErrors(Throwable throwable, int systemExitCode) {
if (throwable instanceof Error) {
handleFatalError((Error) throwable, systemExitCode);
} else {
LOGGER.error("Unexpected exception in DiagnosticsProvider.endSpan. ", throwable);
throw new RuntimeException(throwable);
}
}
private void handleFatalError(Error error, int systemExitCode) {
Exception exception = DiagnosticsProviderJvmFatalErrorMapper.getMapper().mapFatalError(error);
if (exception != null) {
String errorMessage = "Runtime exception mapped from fatal error " + error;
throw new RuntimeException(errorMessage, exception);
}
if (this.shouldSystemExitOnError) {
LOGGER.error("Unexpected error in DiagnosticsProvider.endSpan. Calling System.exit({})...", systemExitCode, error);
System.err.println(
String.format(
"Unexpected error in DiagnosticsProvider.endSpan. Calling System.exit(%d)... %s",
systemExitCode,
error)
);
System.exit(systemExitCode);
} else {
throw error;
}
}
private static final class EnabledNoOpTracer implements Tracer {
public static final Tracer INSTANCE = new EnabledNoOpTracer();
private EnabledNoOpTracer() {
}
@Override
public Context start(String methodName, Context context) {
return context;
}
@Override
public Context start(String methodName, Context context, ProcessKind processKind) {
return context;
}
@Override
public void end(int responseCode, Throwable error, Context context) {
}
@Override
public void end(String errorCondition, Throwable error, Context context) {
}
@Override
public void setAttribute(String key, String value, Context context) {
}
@Override
public Context setSpanName(String spanName, Context context) {
return Context.NONE;
}
@Override
public void addLink(Context context) {
}
@Override
public Context extractContext(String diagnosticId, Context context) {
return Context.NONE;
}
@Override
public Context getSharedSpanBuilder(String spanName, Context context) {
return Context.NONE;
}
}
} |
Do we still need this since we call `System.err.println` in the next line of code? | private void handleErrors(Throwable throwable, int systemExitCode) {
if (throwable instanceof Error) {
LOGGER.error("Unexpected error in DiagnosticsProvider.endSpan. ", throwable);
System.err.println("Unexpected error in DiagnosticsProvider.endSpan. " + throwable);
System.exit(systemExitCode);
} else {
LOGGER.error("Unexpected exception in DiagnosticsProvider.endSpan. ", throwable);
throw new RuntimeException(throwable);
}
} | LOGGER.error("Unexpected error in DiagnosticsProvider.endSpan. ", throwable); | private void handleErrors(Throwable throwable, int systemExitCode) {
if (throwable instanceof Error) {
handleFatalError((Error) throwable, systemExitCode);
} else {
LOGGER.error("Unexpected exception in DiagnosticsProvider.endSpan. ", throwable);
throw new RuntimeException(throwable);
}
} | class OpenTelemetryCosmosTracer implements CosmosTracer {
private final Tracer tracer;
private final CosmosClientTelemetryConfig config;
private final String clientId;
private final String connectionMode;
private final String userAgent;
public OpenTelemetryCosmosTracer(
Tracer tracer,
CosmosClientTelemetryConfig config,
String clientId,
String userAgent,
String connectionMode) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
checkNotNull(config, "Argument 'config' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.tracer = tracer;
this.config = config;
this.clientId = clientId;
this.userAgent = userAgent;
this.connectionMode = connectionMode;
}
private boolean isTransportLevelTracingEnabled() {
return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config);
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx);
return tracer.start(spanName, spanOptions, local);
}
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) {
StartSpanOptions spanOptions;
if (tracer instanceof EnabledNoOpTracer) {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL);
} else {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL)
.setAttribute("db.system", "cosmosdb")
.setAttribute("db.operation", spanName)
.setAttribute("net.peer.name", cosmosCtx.getAccountName())
.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType())
.setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType())
.setAttribute("db.name", cosmosCtx.getDatabaseName())
.setAttribute("db.cosmosdb.client_id", this.clientId)
.setAttribute("user_agent.original", this.userAgent)
.setAttribute("db.cosmosdb.connection_mode", this.connectionMode);
if (!cosmosCtx.getOperationId().isEmpty() &&
!cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) {
spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId());
}
String containerName = cosmosCtx.getContainerName();
if (containerName != null) {
spanOptions.setAttribute("db.cosmosdb.container", containerName);
}
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
if (cosmosCtx == null) {
return;
}
if (!cosmosCtx.isCompleted()) {
tracer.end("CosmosCtx not completed yet.", null, context);
return;
}
String errorMessage = null;
Throwable finalError = cosmosCtx.getFinalError();
if (finalError != null && cosmosCtx.isFailure()) {
if (finalError instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) finalError;
errorMessage = cosmosException.getShortMessage();
} else {
errorMessage = finalError.getMessage();
}
}
if (tracer instanceof EnabledNoOpTracer) {
tracer.end(errorMessage, finalError, context);
return;
}
if (isEmptyCompletion) {
tracer.setAttribute(
"db.cosmosdb.is_empty_completion",
Boolean.toString(true),
context);
tracer.end(errorMessage, finalError, context);
return;
}
if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) {
String trigger;
if (cosmosCtx.isFailure()) {
trigger = "Failure";
} else {
trigger = "ThresholdViolation";
}
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
}
if (finalError != null) {
String exceptionType;
if (finalError instanceof CosmosException) {
exceptionType = CosmosException.class.getCanonicalName();
} else {
exceptionType = finalError.getClass().getCanonicalName();
}
tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context);
tracer.setAttribute("exception.type", exceptionType, context);
if (errorMessage != null) {
tracer.setAttribute("exception.message", errorMessage, context);
}
tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context);
}
if (this.isTransportLevelTracingEnabled()) {
traceTransportLevel(cosmosCtx, context);
}
tracer.setAttribute(
"db.cosmosdb.status_code",
Integer.toString(cosmosCtx.getStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.sub_status_code",
Integer.toString(cosmosCtx.getSubStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.request_charge",
Float.toString(cosmosCtx.getTotalRequestCharge()),
context);
tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context);
Set<String> regionsContacted = cosmosCtx.getContactedRegionNames();
if (!regionsContacted.isEmpty()) {
tracer.setAttribute(
"db.cosmosdb.regions_contacted",
String.join(", ", regionsContacted),
context);
}
tracer.end(errorMessage, finalError, context);
}
private void recordStoreResponseStatistics(
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics,
Context context) {
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Map<String, Object> attributes = new HashMap<>();
attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString());
attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString());
attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString());
attributes.put("rntbd.region", responseStatistics.getRegionName());
if (storeResultDiagnostics.getLsn() > 0) {
attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn()));
}
if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) {
attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN()));
}
String responseSessionToken = responseStatistics.getRequestSessionToken();
if (responseSessionToken != null && !responseSessionToken.isEmpty()) {
attributes.put("rntbd.session_token", responseSessionToken);
}
String requestSessionToken = responseStatistics.getRequestSessionToken();
if (requestSessionToken != null && !requestSessionToken.isEmpty()) {
attributes.put("rntbd.request_session_token", requestSessionToken);
}
String activityId = storeResponseDiagnostics.getActivityId();
if (activityId != null && !activityId.isEmpty()) {
attributes.put("rntbd.activity_id", activityId);
}
String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
if (pkRangeId != null && !pkRangeId.isEmpty()) {
attributes.put("rntbd.partition_key_range_id", pkRangeId);
}
attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode()));
if (storeResponseDiagnostics.getSubStatusCode() != 0) {
attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode()));
}
if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) {
attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId());
}
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
attributes.put("rntbd.backend_latency", Double.toString(backendLatency));
}
double requestCharge = storeResponseDiagnostics.getRequestCharge();
attributes.put("rntbd.request_charge", Double.toString(requestCharge));
Duration latency = responseStatistics.getDuration();
if (latency != null) {
attributes.put("rntbd.latency", latency.toString());
}
if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) {
attributes.put(
"rntbd.is_new_channel",
storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit());
}
OffsetDateTime startTime = null;
for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) {
OffsetDateTime eventTime = event.getStartTime() != null ?
event.getStartTime().atOffset(ZoneOffset.UTC) : null;
if (eventTime != null &&
(startTime == null || startTime.isBefore(eventTime))) {
startTime = eventTime;
}
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString());
}
attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength());
attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength());
this.tracer.addEvent(
"rntbd.request",
attributes,
startTime != null ? startTime : OffsetDateTime.now(),
context);
}
}
private void traceTransportLevelRequests(
Collection<ClientSideRequestStatistics> clientSideRequestStatistics,
Context context) {
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
requestStatistics.getResponseStatisticsList(),
context);
recordStoreResponseStatistics(
requestStatistics.getSupplementalResponseStatisticsList(),
context);
}
}
}
private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) {
Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics =
ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext);
traceTransportLevelRequests(
combinedClientSideRequestStatistics,
context);
}
} | class OpenTelemetryCosmosTracer implements CosmosTracer {
private final Tracer tracer;
private final CosmosClientTelemetryConfig config;
private final String clientId;
private final String connectionMode;
private final String userAgent;
public OpenTelemetryCosmosTracer(
Tracer tracer,
CosmosClientTelemetryConfig config,
String clientId,
String userAgent,
String connectionMode) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
checkNotNull(config, "Argument 'config' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.tracer = tracer;
this.config = config;
this.clientId = clientId;
this.userAgent = userAgent;
this.connectionMode = connectionMode;
}
private boolean isTransportLevelTracingEnabled() {
return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config);
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx);
return tracer.start(spanName, spanOptions, local);
}
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) {
StartSpanOptions spanOptions;
if (tracer instanceof EnabledNoOpTracer) {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL);
} else {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL)
.setAttribute("db.system", "cosmosdb")
.setAttribute("db.operation", spanName)
.setAttribute("net.peer.name", cosmosCtx.getAccountName())
.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType())
.setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType())
.setAttribute("db.name", cosmosCtx.getDatabaseName())
.setAttribute("db.cosmosdb.client_id", this.clientId)
.setAttribute("user_agent.original", this.userAgent)
.setAttribute("db.cosmosdb.connection_mode", this.connectionMode);
if (!cosmosCtx.getOperationId().isEmpty() &&
!cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) {
spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId());
}
String containerName = cosmosCtx.getContainerName();
if (containerName != null) {
spanOptions.setAttribute("db.cosmosdb.container", containerName);
}
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
if (cosmosCtx == null) {
return;
}
if (!cosmosCtx.isCompleted()) {
tracer.end("CosmosCtx not completed yet.", null, context);
return;
}
String errorMessage = null;
Throwable finalError = cosmosCtx.getFinalError();
if (finalError != null && cosmosCtx.isFailure()) {
if (finalError instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) finalError;
errorMessage = cosmosException.getShortMessage();
} else {
errorMessage = finalError.getMessage();
}
}
if (tracer instanceof EnabledNoOpTracer) {
tracer.end(errorMessage, finalError, context);
return;
}
if (isEmptyCompletion) {
tracer.setAttribute(
"db.cosmosdb.is_empty_completion",
Boolean.toString(true),
context);
tracer.end(errorMessage, finalError, context);
return;
}
if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) {
String trigger;
if (cosmosCtx.isFailure()) {
trigger = "Failure";
} else {
trigger = "ThresholdViolation";
}
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
}
if (finalError != null) {
String exceptionType;
if (finalError instanceof CosmosException) {
exceptionType = CosmosException.class.getCanonicalName();
} else {
exceptionType = finalError.getClass().getCanonicalName();
}
tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context);
tracer.setAttribute("exception.type", exceptionType, context);
if (errorMessage != null) {
tracer.setAttribute("exception.message", errorMessage, context);
}
tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context);
}
if (this.isTransportLevelTracingEnabled()) {
traceTransportLevel(cosmosCtx, context);
}
tracer.setAttribute(
"db.cosmosdb.status_code",
Integer.toString(cosmosCtx.getStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.sub_status_code",
Integer.toString(cosmosCtx.getSubStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.request_charge",
Float.toString(cosmosCtx.getTotalRequestCharge()),
context);
tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context);
Set<String> regionsContacted = cosmosCtx.getContactedRegionNames();
if (!regionsContacted.isEmpty()) {
tracer.setAttribute(
"db.cosmosdb.regions_contacted",
String.join(", ", regionsContacted),
context);
}
tracer.end(errorMessage, finalError, context);
}
private void recordStoreResponseStatistics(
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics,
Context context) {
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Map<String, Object> attributes = new HashMap<>();
attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString());
attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString());
attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString());
attributes.put("rntbd.region", responseStatistics.getRegionName());
if (storeResultDiagnostics.getLsn() > 0) {
attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn()));
}
if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) {
attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN()));
}
String responseSessionToken = responseStatistics.getRequestSessionToken();
if (responseSessionToken != null && !responseSessionToken.isEmpty()) {
attributes.put("rntbd.session_token", responseSessionToken);
}
String requestSessionToken = responseStatistics.getRequestSessionToken();
if (requestSessionToken != null && !requestSessionToken.isEmpty()) {
attributes.put("rntbd.request_session_token", requestSessionToken);
}
String activityId = storeResponseDiagnostics.getActivityId();
if (activityId != null && !activityId.isEmpty()) {
attributes.put("rntbd.activity_id", activityId);
}
String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
if (pkRangeId != null && !pkRangeId.isEmpty()) {
attributes.put("rntbd.partition_key_range_id", pkRangeId);
}
attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode()));
if (storeResponseDiagnostics.getSubStatusCode() != 0) {
attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode()));
}
if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) {
attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId());
}
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
attributes.put("rntbd.backend_latency", Double.toString(backendLatency));
}
double requestCharge = storeResponseDiagnostics.getRequestCharge();
attributes.put("rntbd.request_charge", Double.toString(requestCharge));
Duration latency = responseStatistics.getDuration();
if (latency != null) {
attributes.put("rntbd.latency", latency.toString());
}
if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) {
attributes.put(
"rntbd.is_new_channel",
storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit());
}
OffsetDateTime startTime = null;
for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) {
OffsetDateTime eventTime = event.getStartTime() != null ?
event.getStartTime().atOffset(ZoneOffset.UTC) : null;
if (eventTime != null &&
(startTime == null || startTime.isBefore(eventTime))) {
startTime = eventTime;
}
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString());
}
attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength());
attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength());
this.tracer.addEvent(
"rntbd.request",
attributes,
startTime != null ? startTime : OffsetDateTime.now(),
context);
}
}
private void traceTransportLevelRequests(
Collection<ClientSideRequestStatistics> clientSideRequestStatistics,
Context context) {
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
requestStatistics.getResponseStatisticsList(),
context);
recordStoreResponseStatistics(
requestStatistics.getSupplementalResponseStatisticsList(),
context);
}
}
}
private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) {
Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics =
ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext);
traceTransportLevelRequests(
combinedClientSideRequestStatistics,
context);
}
} |
want to keep the logger logging if they can show properly so for some customers they do not need to look two places to figure out the issue. The System.err is a fallback if the logging does not show up. Do not see a risk here for keeping both. | private void handleErrors(Throwable throwable, int systemExitCode) {
if (throwable instanceof Error) {
LOGGER.error("Unexpected error in DiagnosticsProvider.endSpan. ", throwable);
System.err.println("Unexpected error in DiagnosticsProvider.endSpan. " + throwable);
System.exit(systemExitCode);
} else {
LOGGER.error("Unexpected exception in DiagnosticsProvider.endSpan. ", throwable);
throw new RuntimeException(throwable);
}
} | LOGGER.error("Unexpected error in DiagnosticsProvider.endSpan. ", throwable); | private void handleErrors(Throwable throwable, int systemExitCode) {
if (throwable instanceof Error) {
handleFatalError((Error) throwable, systemExitCode);
} else {
LOGGER.error("Unexpected exception in DiagnosticsProvider.endSpan. ", throwable);
throw new RuntimeException(throwable);
}
} | class OpenTelemetryCosmosTracer implements CosmosTracer {
private final Tracer tracer;
private final CosmosClientTelemetryConfig config;
private final String clientId;
private final String connectionMode;
private final String userAgent;
public OpenTelemetryCosmosTracer(
Tracer tracer,
CosmosClientTelemetryConfig config,
String clientId,
String userAgent,
String connectionMode) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
checkNotNull(config, "Argument 'config' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.tracer = tracer;
this.config = config;
this.clientId = clientId;
this.userAgent = userAgent;
this.connectionMode = connectionMode;
}
private boolean isTransportLevelTracingEnabled() {
return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config);
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx);
return tracer.start(spanName, spanOptions, local);
}
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) {
StartSpanOptions spanOptions;
if (tracer instanceof EnabledNoOpTracer) {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL);
} else {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL)
.setAttribute("db.system", "cosmosdb")
.setAttribute("db.operation", spanName)
.setAttribute("net.peer.name", cosmosCtx.getAccountName())
.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType())
.setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType())
.setAttribute("db.name", cosmosCtx.getDatabaseName())
.setAttribute("db.cosmosdb.client_id", this.clientId)
.setAttribute("user_agent.original", this.userAgent)
.setAttribute("db.cosmosdb.connection_mode", this.connectionMode);
if (!cosmosCtx.getOperationId().isEmpty() &&
!cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) {
spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId());
}
String containerName = cosmosCtx.getContainerName();
if (containerName != null) {
spanOptions.setAttribute("db.cosmosdb.container", containerName);
}
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
if (cosmosCtx == null) {
return;
}
if (!cosmosCtx.isCompleted()) {
tracer.end("CosmosCtx not completed yet.", null, context);
return;
}
String errorMessage = null;
Throwable finalError = cosmosCtx.getFinalError();
if (finalError != null && cosmosCtx.isFailure()) {
if (finalError instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) finalError;
errorMessage = cosmosException.getShortMessage();
} else {
errorMessage = finalError.getMessage();
}
}
if (tracer instanceof EnabledNoOpTracer) {
tracer.end(errorMessage, finalError, context);
return;
}
if (isEmptyCompletion) {
tracer.setAttribute(
"db.cosmosdb.is_empty_completion",
Boolean.toString(true),
context);
tracer.end(errorMessage, finalError, context);
return;
}
if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) {
String trigger;
if (cosmosCtx.isFailure()) {
trigger = "Failure";
} else {
trigger = "ThresholdViolation";
}
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
}
if (finalError != null) {
String exceptionType;
if (finalError instanceof CosmosException) {
exceptionType = CosmosException.class.getCanonicalName();
} else {
exceptionType = finalError.getClass().getCanonicalName();
}
tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context);
tracer.setAttribute("exception.type", exceptionType, context);
if (errorMessage != null) {
tracer.setAttribute("exception.message", errorMessage, context);
}
tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context);
}
if (this.isTransportLevelTracingEnabled()) {
traceTransportLevel(cosmosCtx, context);
}
tracer.setAttribute(
"db.cosmosdb.status_code",
Integer.toString(cosmosCtx.getStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.sub_status_code",
Integer.toString(cosmosCtx.getSubStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.request_charge",
Float.toString(cosmosCtx.getTotalRequestCharge()),
context);
tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context);
Set<String> regionsContacted = cosmosCtx.getContactedRegionNames();
if (!regionsContacted.isEmpty()) {
tracer.setAttribute(
"db.cosmosdb.regions_contacted",
String.join(", ", regionsContacted),
context);
}
tracer.end(errorMessage, finalError, context);
}
private void recordStoreResponseStatistics(
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics,
Context context) {
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Map<String, Object> attributes = new HashMap<>();
attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString());
attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString());
attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString());
attributes.put("rntbd.region", responseStatistics.getRegionName());
if (storeResultDiagnostics.getLsn() > 0) {
attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn()));
}
if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) {
attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN()));
}
String responseSessionToken = responseStatistics.getRequestSessionToken();
if (responseSessionToken != null && !responseSessionToken.isEmpty()) {
attributes.put("rntbd.session_token", responseSessionToken);
}
String requestSessionToken = responseStatistics.getRequestSessionToken();
if (requestSessionToken != null && !requestSessionToken.isEmpty()) {
attributes.put("rntbd.request_session_token", requestSessionToken);
}
String activityId = storeResponseDiagnostics.getActivityId();
if (activityId != null && !activityId.isEmpty()) {
attributes.put("rntbd.activity_id", activityId);
}
String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
if (pkRangeId != null && !pkRangeId.isEmpty()) {
attributes.put("rntbd.partition_key_range_id", pkRangeId);
}
attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode()));
if (storeResponseDiagnostics.getSubStatusCode() != 0) {
attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode()));
}
if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) {
attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId());
}
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
attributes.put("rntbd.backend_latency", Double.toString(backendLatency));
}
double requestCharge = storeResponseDiagnostics.getRequestCharge();
attributes.put("rntbd.request_charge", Double.toString(requestCharge));
Duration latency = responseStatistics.getDuration();
if (latency != null) {
attributes.put("rntbd.latency", latency.toString());
}
if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) {
attributes.put(
"rntbd.is_new_channel",
storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit());
}
OffsetDateTime startTime = null;
for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) {
OffsetDateTime eventTime = event.getStartTime() != null ?
event.getStartTime().atOffset(ZoneOffset.UTC) : null;
if (eventTime != null &&
(startTime == null || startTime.isBefore(eventTime))) {
startTime = eventTime;
}
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString());
}
attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength());
attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength());
this.tracer.addEvent(
"rntbd.request",
attributes,
startTime != null ? startTime : OffsetDateTime.now(),
context);
}
}
private void traceTransportLevelRequests(
Collection<ClientSideRequestStatistics> clientSideRequestStatistics,
Context context) {
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
requestStatistics.getResponseStatisticsList(),
context);
recordStoreResponseStatistics(
requestStatistics.getSupplementalResponseStatisticsList(),
context);
}
}
}
private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) {
Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics =
ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext);
traceTransportLevelRequests(
combinedClientSideRequestStatistics,
context);
}
} | class OpenTelemetryCosmosTracer implements CosmosTracer {
private final Tracer tracer;
private final CosmosClientTelemetryConfig config;
private final String clientId;
private final String connectionMode;
private final String userAgent;
public OpenTelemetryCosmosTracer(
Tracer tracer,
CosmosClientTelemetryConfig config,
String clientId,
String userAgent,
String connectionMode) {
checkNotNull(tracer, "Argument 'tracer' must not be null.");
checkNotNull(config, "Argument 'config' must not be null.");
checkNotNull(clientId, "Argument 'clientId' must not be null.");
checkNotNull(userAgent, "Argument 'userAgent' must not be null.");
checkNotNull(connectionMode, "Argument 'connectionMode' must not be null.");
this.tracer = tracer;
this.config = config;
this.clientId = clientId;
this.userAgent = userAgent;
this.connectionMode = connectionMode;
}
private boolean isTransportLevelTracingEnabled() {
return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config);
}
@Override
public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) {
checkNotNull(spanName, "Argument 'spanName' must not be null.");
checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null.");
Context local = Objects
.requireNonNull(context, "'context' cannot be null.")
.addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx);
StartSpanOptions spanOptions = this.startSpanOptions(
spanName,
cosmosCtx);
return tracer.start(spanName, spanOptions, local);
}
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) {
StartSpanOptions spanOptions;
if (tracer instanceof EnabledNoOpTracer) {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL);
} else {
spanOptions = new StartSpanOptions(SpanKind.INTERNAL)
.setAttribute("db.system", "cosmosdb")
.setAttribute("db.operation", spanName)
.setAttribute("net.peer.name", cosmosCtx.getAccountName())
.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType())
.setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType())
.setAttribute("db.name", cosmosCtx.getDatabaseName())
.setAttribute("db.cosmosdb.client_id", this.clientId)
.setAttribute("user_agent.original", this.userAgent)
.setAttribute("db.cosmosdb.connection_mode", this.connectionMode);
if (!cosmosCtx.getOperationId().isEmpty() &&
!cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) {
spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId());
}
String containerName = cosmosCtx.getContainerName();
if (containerName != null) {
spanOptions.setAttribute("db.cosmosdb.container", containerName);
}
}
return spanOptions;
}
@Override
public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context, boolean isEmptyCompletion) {
if (cosmosCtx == null) {
return;
}
if (!cosmosCtx.isCompleted()) {
tracer.end("CosmosCtx not completed yet.", null, context);
return;
}
String errorMessage = null;
Throwable finalError = cosmosCtx.getFinalError();
if (finalError != null && cosmosCtx.isFailure()) {
if (finalError instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) finalError;
errorMessage = cosmosException.getShortMessage();
} else {
errorMessage = finalError.getMessage();
}
}
if (tracer instanceof EnabledNoOpTracer) {
tracer.end(errorMessage, finalError, context);
return;
}
if (isEmptyCompletion) {
tracer.setAttribute(
"db.cosmosdb.is_empty_completion",
Boolean.toString(true),
context);
tracer.end(errorMessage, finalError, context);
return;
}
if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) {
String trigger;
if (cosmosCtx.isFailure()) {
trigger = "Failure";
} else {
trigger = "ThresholdViolation";
}
emitDiagnosticsEvents(tracer, cosmosCtx, trigger, context);
}
if (finalError != null) {
String exceptionType;
if (finalError instanceof CosmosException) {
exceptionType = CosmosException.class.getCanonicalName();
} else {
exceptionType = finalError.getClass().getCanonicalName();
}
tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context);
tracer.setAttribute("exception.type", exceptionType, context);
if (errorMessage != null) {
tracer.setAttribute("exception.message", errorMessage, context);
}
tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context);
}
if (this.isTransportLevelTracingEnabled()) {
traceTransportLevel(cosmosCtx, context);
}
tracer.setAttribute(
"db.cosmosdb.status_code",
Integer.toString(cosmosCtx.getStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.sub_status_code",
Integer.toString(cosmosCtx.getSubStatusCode()),
context);
tracer.setAttribute(
"db.cosmosdb.request_charge",
Float.toString(cosmosCtx.getTotalRequestCharge()),
context);
tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context);
tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context);
Set<String> regionsContacted = cosmosCtx.getContactedRegionNames();
if (!regionsContacted.isEmpty()) {
tracer.setAttribute(
"db.cosmosdb.regions_contacted",
String.join(", ", regionsContacted),
context);
}
tracer.end(errorMessage, finalError, context);
}
private void recordStoreResponseStatistics(
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics,
Context context) {
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Map<String, Object> attributes = new HashMap<>();
attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString());
attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString());
attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString());
attributes.put("rntbd.region", responseStatistics.getRegionName());
if (storeResultDiagnostics.getLsn() > 0) {
attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn()));
}
if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) {
attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN()));
}
String responseSessionToken = responseStatistics.getRequestSessionToken();
if (responseSessionToken != null && !responseSessionToken.isEmpty()) {
attributes.put("rntbd.session_token", responseSessionToken);
}
String requestSessionToken = responseStatistics.getRequestSessionToken();
if (requestSessionToken != null && !requestSessionToken.isEmpty()) {
attributes.put("rntbd.request_session_token", requestSessionToken);
}
String activityId = storeResponseDiagnostics.getActivityId();
if (activityId != null && !activityId.isEmpty()) {
attributes.put("rntbd.activity_id", activityId);
}
String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId();
if (pkRangeId != null && !pkRangeId.isEmpty()) {
attributes.put("rntbd.partition_key_range_id", pkRangeId);
}
attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode()));
if (storeResponseDiagnostics.getSubStatusCode() != 0) {
attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode()));
}
if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) {
attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId());
}
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
attributes.put("rntbd.backend_latency", Double.toString(backendLatency));
}
double requestCharge = storeResponseDiagnostics.getRequestCharge();
attributes.put("rntbd.request_charge", Double.toString(requestCharge));
Duration latency = responseStatistics.getDuration();
if (latency != null) {
attributes.put("rntbd.latency", latency.toString());
}
if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) {
attributes.put(
"rntbd.is_new_channel",
storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit());
}
OffsetDateTime startTime = null;
for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) {
OffsetDateTime eventTime = event.getStartTime() != null ?
event.getStartTime().atOffset(ZoneOffset.UTC) : null;
if (eventTime != null &&
(startTime == null || startTime.isBefore(eventTime))) {
startTime = eventTime;
}
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString());
}
attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength());
attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength());
this.tracer.addEvent(
"rntbd.request",
attributes,
startTime != null ? startTime : OffsetDateTime.now(),
context);
}
}
private void traceTransportLevelRequests(
Collection<ClientSideRequestStatistics> clientSideRequestStatistics,
Context context) {
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
requestStatistics.getResponseStatisticsList(),
context);
recordStoreResponseStatistics(
requestStatistics.getSupplementalResponseStatisticsList(),
context);
}
}
}
private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) {
Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics =
ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext);
traceTransportLevelRequests(
combinedClientSideRequestStatistics,
context);
}
} |
please use the SystemProperty with fallback to env variable and fallback to default pattern. | public static boolean shouldDiagnosticsProviderSystemExitOnError() {
return getJVMConfigAsBoolean(
DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR,
DEFAULT_DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR);
} | return getJVMConfigAsBoolean( | public static boolean shouldDiagnosticsProviderSystemExitOnError() {
String shouldSystemExit =
System.getProperty(
DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR,
firstNonNull(
emptyToNull(System.getenv().get(DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR)),
String.valueOf(DEFAULT_DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR)));
return Boolean.parseBoolean(shouldSystemExit);
} | class Configs {
private static final Logger logger = LoggerFactory.getLogger(Configs.class);
/**
* Integer value specifying the speculation type
* <pre>
* 0 - No speculation
* 1 - Threshold based speculation
* </pre>
*/
public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE";
public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD";
public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP";
private final SslContext sslContext;
private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL";
private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol";
private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP;
private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS";
private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS";
private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES";
private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES";
private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES";
private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES";
private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT";
private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS";
public static final int DEFAULT_HTTP_DEFAULT_CONNECTION_POOL_SIZE = 1000;
public static final String HTTP_DEFAULT_CONNECTION_POOL_SIZE = "COSMOS.DEFAULT_HTTP_CONNECTION_POOL_SIZE";
public static final String HTTP_DEFAULT_CONNECTION_POOL_SIZE_VARIABLE = "COSMOS_DEFAULT_HTTP_CONNECTION_POOL_SIZE";
public static final boolean DEFAULT_E2E_FOR_NON_POINT_DISABLED_DEFAULT = false;
public static final String DEFAULT_E2E_FOR_NON_POINT_DISABLED = "COSMOS.E2E_FOR_NON_POINT_DISABLED";
public static final String DEFAULT_E2E_FOR_NON_POINT_DISABLED_VARIABLE = "COSMOS_E2E_FOR_NON_POINT_DISABLED";
public static final int DEFAULT_HTTP_MAX_REQUEST_TIMEOUT = 60;
public static final String HTTP_MAX_REQUEST_TIMEOUT = "COSMOS.HTTP_MAX_REQUEST_TIMEOUT";
public static final String HTTP_MAX_REQUEST_TIMEOUT_VARIABLE = "COSMOS_HTTP_MAX_REQUEST_TIMEOUT";
private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS";
private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS";
public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY";
public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY";
private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG";
private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS";
private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT";
private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME";
private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED";
private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60;
private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60;
private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024;
private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096;
private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192;
private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024;
private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6;
private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6;
private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6;
private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5;
private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30;
private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30;
private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4;
private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10;
private static final int CPU_CNT = Runtime.getRuntime().availableProcessors();
private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500;
private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60;
private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45);
private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool";
private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60;
private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5;
private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 500;
public static final int MIN_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS = 100;
private static final String DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_IN_REGION-RETRY_TIME_IN_MILLISECONDS";
private static final int DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS = 500;
private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE";
private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false;
private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED";
private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false;
private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING";
private static final boolean DEFAULT_USE_LEGACY_TRACING = false;
private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED";
private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true;
private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED";
private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true;
private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT";
private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1;
private static final String MAX_TRACE_MESSAGE_LENGTH = "COSMOS.MAX_TRACE_MESSAGE_LENGTH";
private static final int DEFAULT_MAX_TRACE_MESSAGE_LENGTH = 32 * 1024;
private static final int MIN_MAX_TRACE_MESSAGE_LENGTH = 8 * 1024;
private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY";
private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt();
private static final String OPEN_CONNECTIONS_CONCURRENCY = "COSMOS.OPEN_CONNECTIONS_CONCURRENCY";
private static final int DEFAULT_OPEN_CONNECTIONS_CONCURRENCY = 1;
public static final String MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = "COSMOS.MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED";
private static final int DEFAULT_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = 1;
public static final int MIN_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = 1;
public static final String TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS = "COSMOS.TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS";
public static final String DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR = "COSMOS.DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR";
public static final boolean DEFAULT_DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR = true;
public Configs() {
this.sslContext = sslContextInit();
}
public static int getCPUCnt() {
return CPU_CNT;
}
private SslContext sslContextInit() {
try {
SslProvider sslProvider = SslContext.defaultClientProvider();
return SslContextBuilder.forClient().sslProvider(sslProvider).build();
} catch (SSLException sslException) {
logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException);
throw new IllegalStateException(sslException);
}
}
public SslContext getSslContext() {
return this.sslContext;
}
public Protocol getProtocol() {
String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull(
emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)),
DEFAULT_PROTOCOL.name()));
try {
return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT));
} catch (Exception e) {
logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e);
return DEFAULT_PROTOCOL;
}
}
public int getMaxNumberOfReadBarrierReadRetries() {
return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES;
}
public int getMaxNumberOfPrimaryReadRetries() {
return MAX_NUMBER_OF_PRIMARY_READ_RETRIES;
}
public int getMaxNumberOfReadQuorumRetries() {
return MAX_NUMBER_OF_READ_QUORUM_RETRIES;
}
public int getDelayBetweenReadBarrierCallsInMs() {
return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS;
}
public int getMaxBarrierRetriesForMultiRegion() {
return MAX_BARRIER_RETRIES_FOR_MULTI_REGION;
}
public int getBarrierRetryIntervalInMsForMultiRegion() {
return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION;
}
public int getMaxShortBarrierRetriesForMultiRegion() {
return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION;
}
public int getShortBarrierRetryIntervalInMsForMultiRegion() {
return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION;
}
public int getDirectHttpsMaxConnectionLimit() {
return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE);
}
public int getMaxHttpHeaderSize() {
return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE);
}
public int getMaxHttpInitialLineLength() {
return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH);
}
public int getMaxHttpChunkSize() {
return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES);
}
public int getMaxHttpBodyLength() {
return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES);
}
public int getUnavailableLocationsExpirationTimeInSeconds() {
return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS);
}
public static int getClientTelemetrySchedulingInSec() {
return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS);
}
public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() {
return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS);
}
public String getReactorNettyConnectionPoolName() {
return REACTOR_NETTY_CONNECTION_POOL_NAME;
}
public Duration getMaxIdleConnectionTimeout() {
return MAX_IDLE_CONNECTION_TIMEOUT;
}
public Duration getConnectionAcquireTimeout() {
return CONNECTION_ACQUIRE_TIMEOUT;
}
public static int getHttpResponseTimeoutInSeconds() {
return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static int getQueryPlanResponseTimeoutInSeconds() {
return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static String getClientTelemetryEndpoint() {
return System.getProperty(CLIENT_TELEMETRY_ENDPOINT);
}
public static String getClientTelemetryProxyOptionsConfig() {
return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG);
}
public static String getNonIdempotentWriteRetryPolicy() {
String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return valueFromSystemProperty;
}
return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE);
}
public static int getDefaultHttpPoolSize() {
String valueFromSystemProperty = System.getProperty(HTTP_DEFAULT_CONNECTION_POOL_SIZE);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(HTTP_DEFAULT_CONNECTION_POOL_SIZE_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.valueOf(valueFromEnvVariable);
}
return DEFAULT_HTTP_DEFAULT_CONNECTION_POOL_SIZE;
}
public static boolean isDefaultE2ETimeoutDisabledForNonPointOperations() {
String valueFromSystemProperty = System.getProperty(DEFAULT_E2E_FOR_NON_POINT_DISABLED);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Boolean.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(DEFAULT_E2E_FOR_NON_POINT_DISABLED_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Boolean.valueOf(valueFromEnvVariable);
}
return DEFAULT_E2E_FOR_NON_POINT_DISABLED_DEFAULT;
}
public static int getMaxHttpRequestTimeout() {
String valueFromSystemProperty = System.getProperty(HTTP_MAX_REQUEST_TIMEOUT);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(HTTP_MAX_REQUEST_TIMEOUT_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.valueOf(valueFromEnvVariable);
}
return DEFAULT_HTTP_MAX_REQUEST_TIMEOUT;
}
public static String getEnvironmentName() {
return System.getProperty(ENVIRONMENT_NAME);
}
public static boolean isQueryPlanCachingEnabled() {
return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true);
}
public static int getAddressRefreshResponseTimeoutInSeconds() {
return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static int getSessionTokenMismatchDefaultWaitTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS);
}
public static int getSessionTokenMismatchInitialBackoffTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS);
}
public static int getSessionTokenMismatchMaximumBackoffTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS);
}
public static int getSpeculationType() {
return getJVMConfigAsInt(SPECULATION_TYPE, 0);
}
public static int speculationThreshold() {
return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500);
}
public static int speculationThresholdStep() {
return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100);
}
public static boolean shouldSwitchOffIOThreadForResponse() {
return getJVMConfigAsBoolean(
SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME,
DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE);
}
public static boolean isEmptyPageDiagnosticsEnabled() {
return getJVMConfigAsBoolean(
QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED,
DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED);
}
public static boolean useLegacyTracing() {
return getJVMConfigAsBoolean(
USE_LEGACY_TRACING,
DEFAULT_USE_LEGACY_TRACING);
}
private static int getJVMConfigAsInt(String propName, int defaultValue) {
String propValue = System.getProperty(propName);
return getIntValue(propValue, defaultValue);
}
private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) {
String propValue = System.getProperty(propName);
return getBooleanValue(propValue, defaultValue);
}
private static int getIntValue(String val, int defaultValue) {
if (StringUtils.isEmpty(val)) {
return defaultValue;
} else {
return Integer.valueOf(val);
}
}
private static boolean getBooleanValue(String val, boolean defaultValue) {
if (StringUtils.isEmpty(val)) {
return defaultValue;
} else {
return Boolean.valueOf(val);
}
}
public static boolean isReplicaAddressValidationEnabled() {
return getJVMConfigAsBoolean(
REPLICA_ADDRESS_VALIDATION_ENABLED,
DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED);
}
public static boolean isTcpHealthCheckTimeoutDetectionEnabled() {
return getJVMConfigAsBoolean(
TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED,
DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED);
}
public static int getMinConnectionPoolSizePerEndpoint() {
return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT);
}
public static int getOpenConnectionsConcurrency() {
return getIntValue(System.getProperty(OPEN_CONNECTIONS_CONCURRENCY), DEFAULT_OPEN_CONNECTIONS_CONCURRENCY);
}
public static int getAggressiveWarmupConcurrency() {
return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY);
}
public static int getMaxRetriesInLocalRegionWhenRemoteRegionPreferred() {
return
Math.max(
getIntValue(
System.getProperty(MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED),
DEFAULT_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED),
MIN_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED);
}
public static Duration getMinRetryTimeInLocalRegionWhenRemoteRegionPreferred() {
return
Duration.ofMillis(Math.max(
getIntValue(
System.getProperty(DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS_NAME),
DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS),
MIN_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS));
}
public static int getMaxTraceMessageLength() {
return
Math.max(
getIntValue(
System.getProperty(MAX_TRACE_MESSAGE_LENGTH),
DEFAULT_MAX_TRACE_MESSAGE_LENGTH),
MIN_MAX_TRACE_MESSAGE_LENGTH);
}
public static Duration getTcpConnectionAcquisitionTimeout(int defaultValueInMs) {
return Duration.ofMillis(
getIntValue(
System.getProperty(TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS),
defaultValueInMs
)
);
}
} | class Configs {
private static final Logger logger = LoggerFactory.getLogger(Configs.class);
/**
* Integer value specifying the speculation type
* <pre>
* 0 - No speculation
* 1 - Threshold based speculation
* </pre>
*/
public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE";
public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD";
public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP";
private final SslContext sslContext;
private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL";
private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol";
private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP;
private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS";
private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS";
private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES";
private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES";
private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES";
private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES";
private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT";
private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS";
public static final int DEFAULT_HTTP_DEFAULT_CONNECTION_POOL_SIZE = 1000;
public static final String HTTP_DEFAULT_CONNECTION_POOL_SIZE = "COSMOS.DEFAULT_HTTP_CONNECTION_POOL_SIZE";
public static final String HTTP_DEFAULT_CONNECTION_POOL_SIZE_VARIABLE = "COSMOS_DEFAULT_HTTP_CONNECTION_POOL_SIZE";
public static final boolean DEFAULT_E2E_FOR_NON_POINT_DISABLED_DEFAULT = false;
public static final String DEFAULT_E2E_FOR_NON_POINT_DISABLED = "COSMOS.E2E_FOR_NON_POINT_DISABLED";
public static final String DEFAULT_E2E_FOR_NON_POINT_DISABLED_VARIABLE = "COSMOS_E2E_FOR_NON_POINT_DISABLED";
public static final int DEFAULT_HTTP_MAX_REQUEST_TIMEOUT = 60;
public static final String HTTP_MAX_REQUEST_TIMEOUT = "COSMOS.HTTP_MAX_REQUEST_TIMEOUT";
public static final String HTTP_MAX_REQUEST_TIMEOUT_VARIABLE = "COSMOS_HTTP_MAX_REQUEST_TIMEOUT";
private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS";
private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS";
public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY";
public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY";
private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG";
private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS";
private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT";
private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME";
private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED";
private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60;
private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60;
private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024;
private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096;
private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192;
private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024;
private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6;
private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6;
private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6;
private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5;
private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30;
private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30;
private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4;
private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10;
private static final int CPU_CNT = Runtime.getRuntime().availableProcessors();
private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500;
private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60;
private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45);
private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool";
private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60;
private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5;
private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 500;
public static final int MIN_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS = 100;
private static final String DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_IN_REGION-RETRY_TIME_IN_MILLISECONDS";
private static final int DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS = 500;
private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE";
private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false;
private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED";
private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false;
private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING";
private static final boolean DEFAULT_USE_LEGACY_TRACING = false;
private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED";
private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true;
private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED";
private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true;
private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT";
private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1;
private static final String MAX_TRACE_MESSAGE_LENGTH = "COSMOS.MAX_TRACE_MESSAGE_LENGTH";
private static final int DEFAULT_MAX_TRACE_MESSAGE_LENGTH = 32 * 1024;
private static final int MIN_MAX_TRACE_MESSAGE_LENGTH = 8 * 1024;
private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY";
private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt();
private static final String OPEN_CONNECTIONS_CONCURRENCY = "COSMOS.OPEN_CONNECTIONS_CONCURRENCY";
private static final int DEFAULT_OPEN_CONNECTIONS_CONCURRENCY = 1;
public static final String MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = "COSMOS.MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED";
private static final int DEFAULT_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = 1;
public static final int MIN_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = 1;
public static final String TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS = "COSMOS.TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS";
public static final String DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR = "COSMOS.DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR";
public static final boolean DEFAULT_DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR = true;
public Configs() {
this.sslContext = sslContextInit();
}
public static int getCPUCnt() {
return CPU_CNT;
}
private SslContext sslContextInit() {
try {
SslProvider sslProvider = SslContext.defaultClientProvider();
return SslContextBuilder.forClient().sslProvider(sslProvider).build();
} catch (SSLException sslException) {
logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException);
throw new IllegalStateException(sslException);
}
}
public SslContext getSslContext() {
return this.sslContext;
}
public Protocol getProtocol() {
String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull(
emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)),
DEFAULT_PROTOCOL.name()));
try {
return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT));
} catch (Exception e) {
logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e);
return DEFAULT_PROTOCOL;
}
}
public int getMaxNumberOfReadBarrierReadRetries() {
return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES;
}
public int getMaxNumberOfPrimaryReadRetries() {
return MAX_NUMBER_OF_PRIMARY_READ_RETRIES;
}
public int getMaxNumberOfReadQuorumRetries() {
return MAX_NUMBER_OF_READ_QUORUM_RETRIES;
}
public int getDelayBetweenReadBarrierCallsInMs() {
return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS;
}
public int getMaxBarrierRetriesForMultiRegion() {
return MAX_BARRIER_RETRIES_FOR_MULTI_REGION;
}
public int getBarrierRetryIntervalInMsForMultiRegion() {
return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION;
}
public int getMaxShortBarrierRetriesForMultiRegion() {
return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION;
}
public int getShortBarrierRetryIntervalInMsForMultiRegion() {
return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION;
}
public int getDirectHttpsMaxConnectionLimit() {
return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE);
}
public int getMaxHttpHeaderSize() {
return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE);
}
public int getMaxHttpInitialLineLength() {
return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH);
}
public int getMaxHttpChunkSize() {
return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES);
}
public int getMaxHttpBodyLength() {
return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES);
}
public int getUnavailableLocationsExpirationTimeInSeconds() {
return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS);
}
public static int getClientTelemetrySchedulingInSec() {
return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS);
}
public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() {
return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS);
}
public String getReactorNettyConnectionPoolName() {
return REACTOR_NETTY_CONNECTION_POOL_NAME;
}
public Duration getMaxIdleConnectionTimeout() {
return MAX_IDLE_CONNECTION_TIMEOUT;
}
public Duration getConnectionAcquireTimeout() {
return CONNECTION_ACQUIRE_TIMEOUT;
}
public static int getHttpResponseTimeoutInSeconds() {
return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static int getQueryPlanResponseTimeoutInSeconds() {
return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static String getClientTelemetryEndpoint() {
return System.getProperty(CLIENT_TELEMETRY_ENDPOINT);
}
public static String getClientTelemetryProxyOptionsConfig() {
return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG);
}
public static String getNonIdempotentWriteRetryPolicy() {
String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return valueFromSystemProperty;
}
return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE);
}
public static int getDefaultHttpPoolSize() {
String valueFromSystemProperty = System.getProperty(HTTP_DEFAULT_CONNECTION_POOL_SIZE);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(HTTP_DEFAULT_CONNECTION_POOL_SIZE_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.valueOf(valueFromEnvVariable);
}
return DEFAULT_HTTP_DEFAULT_CONNECTION_POOL_SIZE;
}
public static boolean isDefaultE2ETimeoutDisabledForNonPointOperations() {
String valueFromSystemProperty = System.getProperty(DEFAULT_E2E_FOR_NON_POINT_DISABLED);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Boolean.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(DEFAULT_E2E_FOR_NON_POINT_DISABLED_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Boolean.valueOf(valueFromEnvVariable);
}
return DEFAULT_E2E_FOR_NON_POINT_DISABLED_DEFAULT;
}
public static int getMaxHttpRequestTimeout() {
String valueFromSystemProperty = System.getProperty(HTTP_MAX_REQUEST_TIMEOUT);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(HTTP_MAX_REQUEST_TIMEOUT_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.valueOf(valueFromEnvVariable);
}
return DEFAULT_HTTP_MAX_REQUEST_TIMEOUT;
}
public static String getEnvironmentName() {
return System.getProperty(ENVIRONMENT_NAME);
}
public static boolean isQueryPlanCachingEnabled() {
return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true);
}
public static int getAddressRefreshResponseTimeoutInSeconds() {
return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static int getSessionTokenMismatchDefaultWaitTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS);
}
public static int getSessionTokenMismatchInitialBackoffTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS);
}
public static int getSessionTokenMismatchMaximumBackoffTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS);
}
public static int getSpeculationType() {
return getJVMConfigAsInt(SPECULATION_TYPE, 0);
}
public static int speculationThreshold() {
return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500);
}
public static int speculationThresholdStep() {
return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100);
}
public static boolean shouldSwitchOffIOThreadForResponse() {
return getJVMConfigAsBoolean(
SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME,
DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE);
}
public static boolean isEmptyPageDiagnosticsEnabled() {
return getJVMConfigAsBoolean(
QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED,
DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED);
}
public static boolean useLegacyTracing() {
return getJVMConfigAsBoolean(
USE_LEGACY_TRACING,
DEFAULT_USE_LEGACY_TRACING);
}
private static int getJVMConfigAsInt(String propName, int defaultValue) {
String propValue = System.getProperty(propName);
return getIntValue(propValue, defaultValue);
}
private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) {
String propValue = System.getProperty(propName);
return getBooleanValue(propValue, defaultValue);
}
private static int getIntValue(String val, int defaultValue) {
if (StringUtils.isEmpty(val)) {
return defaultValue;
} else {
return Integer.valueOf(val);
}
}
private static boolean getBooleanValue(String val, boolean defaultValue) {
if (StringUtils.isEmpty(val)) {
return defaultValue;
} else {
return Boolean.valueOf(val);
}
}
public static boolean isReplicaAddressValidationEnabled() {
return getJVMConfigAsBoolean(
REPLICA_ADDRESS_VALIDATION_ENABLED,
DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED);
}
public static boolean isTcpHealthCheckTimeoutDetectionEnabled() {
return getJVMConfigAsBoolean(
TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED,
DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED);
}
public static int getMinConnectionPoolSizePerEndpoint() {
return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT);
}
public static int getOpenConnectionsConcurrency() {
return getIntValue(System.getProperty(OPEN_CONNECTIONS_CONCURRENCY), DEFAULT_OPEN_CONNECTIONS_CONCURRENCY);
}
public static int getAggressiveWarmupConcurrency() {
return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY);
}
public static int getMaxRetriesInLocalRegionWhenRemoteRegionPreferred() {
return
Math.max(
getIntValue(
System.getProperty(MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED),
DEFAULT_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED),
MIN_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED);
}
public static Duration getMinRetryTimeInLocalRegionWhenRemoteRegionPreferred() {
return
Duration.ofMillis(Math.max(
getIntValue(
System.getProperty(DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS_NAME),
DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS),
MIN_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS));
}
public static int getMaxTraceMessageLength() {
return
Math.max(
getIntValue(
System.getProperty(MAX_TRACE_MESSAGE_LENGTH),
DEFAULT_MAX_TRACE_MESSAGE_LENGTH),
MIN_MAX_TRACE_MESSAGE_LENGTH);
}
public static Duration getTcpConnectionAcquisitionTimeout(int defaultValueInMs) {
return Duration.ofMillis(
getIntValue(
System.getProperty(TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS),
defaultValueInMs
)
);
}
} |
Please increment a static counter (AtomicLong here) and include that in CosmosDiagnostics - that way we know how many fatal errors have been mapped back to exceptions and can use it as a signal when debugging | public Exception mapFatalError(Error error) {
if (error == null) {
return null;
}
if (this.fatalErrorMapper.get() != null) {
Exception mappedException = this.fatalErrorMapper.get().apply(error);
LOGGER.info("Mapping from Error {} to Exception {}", error.getClass(), mappedException.getClass());
return mappedException;
}
return null;
} | return mappedException; | public Exception mapFatalError(Error error) {
if (error == null || this.fatalErrorMapper.get() == null) {
return null;
}
return this.mapToException(error);
} | class DiagnosticsProviderJvmFatalErrorMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(DiagnosticsProviderJvmFatalErrorMapper.class);
private static DiagnosticsProviderJvmFatalErrorMapper diagnosticsProviderJvmFatalErrorMapper =
new DiagnosticsProviderJvmFatalErrorMapper();
private final AtomicReference<Function<Error, Exception>> fatalErrorMapper;
public DiagnosticsProviderJvmFatalErrorMapper() {
this.fatalErrorMapper = new AtomicReference<>();
}
public void registerFatalErrorMapper(Function<Error, Exception> fatalErrorMapper) {
LOGGER.info("Register diagnostics provider fatal error mapper");
this.fatalErrorMapper.set(fatalErrorMapper);
}
public static DiagnosticsProviderJvmFatalErrorMapper getMapper() {
return diagnosticsProviderJvmFatalErrorMapper;
}
} | class DiagnosticsProviderJvmFatalErrorMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(DiagnosticsProviderJvmFatalErrorMapper.class);
private static DiagnosticsProviderJvmFatalErrorMapper diagnosticsProviderJvmFatalErrorMapper =
new DiagnosticsProviderJvmFatalErrorMapper();
private final AtomicReference<Function<Error, Exception>> fatalErrorMapper;
private final AtomicLong mapperExecutionCount;
public DiagnosticsProviderJvmFatalErrorMapper() {
this.fatalErrorMapper = new AtomicReference<>();
this.mapperExecutionCount = new AtomicLong(0);
}
public void registerFatalErrorMapper(Function<Error, Exception> fatalErrorMapper) {
LOGGER.info("Register diagnostics provider fatal error mapper");
this.fatalErrorMapper.set(fatalErrorMapper);
}
private Exception mapToException(Error error) {
try {
this.mapperExecutionCount.getAndIncrement();
Exception mappedException = this.fatalErrorMapper.get().apply(error);
if (mappedException != null) {
LOGGER.info("Mapping from Error {} to Exception {}", error.getClass(), mappedException.getClass());
return mappedException;
} else {
LOGGER.info("Mapped exception being null.");
}
} catch (Exception mapException) {
LOGGER.error("Map fatal error failed. ", mapException);
}
return null;
}
public static DiagnosticsProviderJvmFatalErrorMapper getMapper() {
return diagnosticsProviderJvmFatalErrorMapper;
}
public long getMapperExecutionCount() {
return mapperExecutionCount.get();
}
} |
Updated | public static boolean shouldDiagnosticsProviderSystemExitOnError() {
return getJVMConfigAsBoolean(
DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR,
DEFAULT_DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR);
} | return getJVMConfigAsBoolean( | public static boolean shouldDiagnosticsProviderSystemExitOnError() {
String shouldSystemExit =
System.getProperty(
DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR,
firstNonNull(
emptyToNull(System.getenv().get(DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR)),
String.valueOf(DEFAULT_DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR)));
return Boolean.parseBoolean(shouldSystemExit);
} | class Configs {
private static final Logger logger = LoggerFactory.getLogger(Configs.class);
/**
* Integer value specifying the speculation type
* <pre>
* 0 - No speculation
* 1 - Threshold based speculation
* </pre>
*/
public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE";
public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD";
public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP";
private final SslContext sslContext;
private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL";
private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol";
private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP;
private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS";
private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS";
private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES";
private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES";
private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES";
private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES";
private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT";
private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS";
public static final int DEFAULT_HTTP_DEFAULT_CONNECTION_POOL_SIZE = 1000;
public static final String HTTP_DEFAULT_CONNECTION_POOL_SIZE = "COSMOS.DEFAULT_HTTP_CONNECTION_POOL_SIZE";
public static final String HTTP_DEFAULT_CONNECTION_POOL_SIZE_VARIABLE = "COSMOS_DEFAULT_HTTP_CONNECTION_POOL_SIZE";
public static final boolean DEFAULT_E2E_FOR_NON_POINT_DISABLED_DEFAULT = false;
public static final String DEFAULT_E2E_FOR_NON_POINT_DISABLED = "COSMOS.E2E_FOR_NON_POINT_DISABLED";
public static final String DEFAULT_E2E_FOR_NON_POINT_DISABLED_VARIABLE = "COSMOS_E2E_FOR_NON_POINT_DISABLED";
public static final int DEFAULT_HTTP_MAX_REQUEST_TIMEOUT = 60;
public static final String HTTP_MAX_REQUEST_TIMEOUT = "COSMOS.HTTP_MAX_REQUEST_TIMEOUT";
public static final String HTTP_MAX_REQUEST_TIMEOUT_VARIABLE = "COSMOS_HTTP_MAX_REQUEST_TIMEOUT";
private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS";
private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS";
public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY";
public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY";
private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG";
private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS";
private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT";
private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME";
private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED";
private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60;
private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60;
private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024;
private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096;
private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192;
private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024;
private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6;
private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6;
private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6;
private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5;
private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30;
private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30;
private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4;
private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10;
private static final int CPU_CNT = Runtime.getRuntime().availableProcessors();
private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500;
private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60;
private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45);
private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool";
private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60;
private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5;
private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 500;
public static final int MIN_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS = 100;
private static final String DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_IN_REGION-RETRY_TIME_IN_MILLISECONDS";
private static final int DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS = 500;
private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE";
private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false;
private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED";
private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false;
private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING";
private static final boolean DEFAULT_USE_LEGACY_TRACING = false;
private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED";
private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true;
private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED";
private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true;
private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT";
private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1;
private static final String MAX_TRACE_MESSAGE_LENGTH = "COSMOS.MAX_TRACE_MESSAGE_LENGTH";
private static final int DEFAULT_MAX_TRACE_MESSAGE_LENGTH = 32 * 1024;
private static final int MIN_MAX_TRACE_MESSAGE_LENGTH = 8 * 1024;
private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY";
private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt();
private static final String OPEN_CONNECTIONS_CONCURRENCY = "COSMOS.OPEN_CONNECTIONS_CONCURRENCY";
private static final int DEFAULT_OPEN_CONNECTIONS_CONCURRENCY = 1;
public static final String MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = "COSMOS.MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED";
private static final int DEFAULT_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = 1;
public static final int MIN_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = 1;
public static final String TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS = "COSMOS.TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS";
public static final String DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR = "COSMOS.DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR";
public static final boolean DEFAULT_DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR = true;
public Configs() {
this.sslContext = sslContextInit();
}
public static int getCPUCnt() {
return CPU_CNT;
}
private SslContext sslContextInit() {
try {
SslProvider sslProvider = SslContext.defaultClientProvider();
return SslContextBuilder.forClient().sslProvider(sslProvider).build();
} catch (SSLException sslException) {
logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException);
throw new IllegalStateException(sslException);
}
}
public SslContext getSslContext() {
return this.sslContext;
}
public Protocol getProtocol() {
String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull(
emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)),
DEFAULT_PROTOCOL.name()));
try {
return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT));
} catch (Exception e) {
logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e);
return DEFAULT_PROTOCOL;
}
}
public int getMaxNumberOfReadBarrierReadRetries() {
return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES;
}
public int getMaxNumberOfPrimaryReadRetries() {
return MAX_NUMBER_OF_PRIMARY_READ_RETRIES;
}
public int getMaxNumberOfReadQuorumRetries() {
return MAX_NUMBER_OF_READ_QUORUM_RETRIES;
}
public int getDelayBetweenReadBarrierCallsInMs() {
return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS;
}
public int getMaxBarrierRetriesForMultiRegion() {
return MAX_BARRIER_RETRIES_FOR_MULTI_REGION;
}
public int getBarrierRetryIntervalInMsForMultiRegion() {
return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION;
}
public int getMaxShortBarrierRetriesForMultiRegion() {
return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION;
}
public int getShortBarrierRetryIntervalInMsForMultiRegion() {
return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION;
}
public int getDirectHttpsMaxConnectionLimit() {
return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE);
}
public int getMaxHttpHeaderSize() {
return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE);
}
public int getMaxHttpInitialLineLength() {
return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH);
}
public int getMaxHttpChunkSize() {
return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES);
}
public int getMaxHttpBodyLength() {
return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES);
}
public int getUnavailableLocationsExpirationTimeInSeconds() {
return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS);
}
public static int getClientTelemetrySchedulingInSec() {
return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS);
}
public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() {
return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS);
}
public String getReactorNettyConnectionPoolName() {
return REACTOR_NETTY_CONNECTION_POOL_NAME;
}
public Duration getMaxIdleConnectionTimeout() {
return MAX_IDLE_CONNECTION_TIMEOUT;
}
public Duration getConnectionAcquireTimeout() {
return CONNECTION_ACQUIRE_TIMEOUT;
}
public static int getHttpResponseTimeoutInSeconds() {
return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static int getQueryPlanResponseTimeoutInSeconds() {
return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static String getClientTelemetryEndpoint() {
return System.getProperty(CLIENT_TELEMETRY_ENDPOINT);
}
public static String getClientTelemetryProxyOptionsConfig() {
return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG);
}
public static String getNonIdempotentWriteRetryPolicy() {
String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return valueFromSystemProperty;
}
return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE);
}
public static int getDefaultHttpPoolSize() {
String valueFromSystemProperty = System.getProperty(HTTP_DEFAULT_CONNECTION_POOL_SIZE);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(HTTP_DEFAULT_CONNECTION_POOL_SIZE_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.valueOf(valueFromEnvVariable);
}
return DEFAULT_HTTP_DEFAULT_CONNECTION_POOL_SIZE;
}
public static boolean isDefaultE2ETimeoutDisabledForNonPointOperations() {
String valueFromSystemProperty = System.getProperty(DEFAULT_E2E_FOR_NON_POINT_DISABLED);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Boolean.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(DEFAULT_E2E_FOR_NON_POINT_DISABLED_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Boolean.valueOf(valueFromEnvVariable);
}
return DEFAULT_E2E_FOR_NON_POINT_DISABLED_DEFAULT;
}
public static int getMaxHttpRequestTimeout() {
String valueFromSystemProperty = System.getProperty(HTTP_MAX_REQUEST_TIMEOUT);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(HTTP_MAX_REQUEST_TIMEOUT_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.valueOf(valueFromEnvVariable);
}
return DEFAULT_HTTP_MAX_REQUEST_TIMEOUT;
}
public static String getEnvironmentName() {
return System.getProperty(ENVIRONMENT_NAME);
}
public static boolean isQueryPlanCachingEnabled() {
return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true);
}
public static int getAddressRefreshResponseTimeoutInSeconds() {
return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static int getSessionTokenMismatchDefaultWaitTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS);
}
public static int getSessionTokenMismatchInitialBackoffTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS);
}
public static int getSessionTokenMismatchMaximumBackoffTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS);
}
public static int getSpeculationType() {
return getJVMConfigAsInt(SPECULATION_TYPE, 0);
}
public static int speculationThreshold() {
return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500);
}
public static int speculationThresholdStep() {
return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100);
}
public static boolean shouldSwitchOffIOThreadForResponse() {
return getJVMConfigAsBoolean(
SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME,
DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE);
}
public static boolean isEmptyPageDiagnosticsEnabled() {
return getJVMConfigAsBoolean(
QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED,
DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED);
}
public static boolean useLegacyTracing() {
return getJVMConfigAsBoolean(
USE_LEGACY_TRACING,
DEFAULT_USE_LEGACY_TRACING);
}
private static int getJVMConfigAsInt(String propName, int defaultValue) {
String propValue = System.getProperty(propName);
return getIntValue(propValue, defaultValue);
}
private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) {
String propValue = System.getProperty(propName);
return getBooleanValue(propValue, defaultValue);
}
private static int getIntValue(String val, int defaultValue) {
if (StringUtils.isEmpty(val)) {
return defaultValue;
} else {
return Integer.valueOf(val);
}
}
private static boolean getBooleanValue(String val, boolean defaultValue) {
if (StringUtils.isEmpty(val)) {
return defaultValue;
} else {
return Boolean.valueOf(val);
}
}
public static boolean isReplicaAddressValidationEnabled() {
return getJVMConfigAsBoolean(
REPLICA_ADDRESS_VALIDATION_ENABLED,
DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED);
}
public static boolean isTcpHealthCheckTimeoutDetectionEnabled() {
return getJVMConfigAsBoolean(
TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED,
DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED);
}
public static int getMinConnectionPoolSizePerEndpoint() {
return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT);
}
public static int getOpenConnectionsConcurrency() {
return getIntValue(System.getProperty(OPEN_CONNECTIONS_CONCURRENCY), DEFAULT_OPEN_CONNECTIONS_CONCURRENCY);
}
public static int getAggressiveWarmupConcurrency() {
return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY);
}
public static int getMaxRetriesInLocalRegionWhenRemoteRegionPreferred() {
return
Math.max(
getIntValue(
System.getProperty(MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED),
DEFAULT_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED),
MIN_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED);
}
public static Duration getMinRetryTimeInLocalRegionWhenRemoteRegionPreferred() {
return
Duration.ofMillis(Math.max(
getIntValue(
System.getProperty(DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS_NAME),
DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS),
MIN_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS));
}
public static int getMaxTraceMessageLength() {
return
Math.max(
getIntValue(
System.getProperty(MAX_TRACE_MESSAGE_LENGTH),
DEFAULT_MAX_TRACE_MESSAGE_LENGTH),
MIN_MAX_TRACE_MESSAGE_LENGTH);
}
public static Duration getTcpConnectionAcquisitionTimeout(int defaultValueInMs) {
return Duration.ofMillis(
getIntValue(
System.getProperty(TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS),
defaultValueInMs
)
);
}
} | class Configs {
private static final Logger logger = LoggerFactory.getLogger(Configs.class);
/**
* Integer value specifying the speculation type
* <pre>
* 0 - No speculation
* 1 - Threshold based speculation
* </pre>
*/
public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE";
public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD";
public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP";
private final SslContext sslContext;
private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL";
private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol";
private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP;
private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS";
private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS";
private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES";
private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES";
private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES";
private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES";
private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT";
private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS";
public static final int DEFAULT_HTTP_DEFAULT_CONNECTION_POOL_SIZE = 1000;
public static final String HTTP_DEFAULT_CONNECTION_POOL_SIZE = "COSMOS.DEFAULT_HTTP_CONNECTION_POOL_SIZE";
public static final String HTTP_DEFAULT_CONNECTION_POOL_SIZE_VARIABLE = "COSMOS_DEFAULT_HTTP_CONNECTION_POOL_SIZE";
public static final boolean DEFAULT_E2E_FOR_NON_POINT_DISABLED_DEFAULT = false;
public static final String DEFAULT_E2E_FOR_NON_POINT_DISABLED = "COSMOS.E2E_FOR_NON_POINT_DISABLED";
public static final String DEFAULT_E2E_FOR_NON_POINT_DISABLED_VARIABLE = "COSMOS_E2E_FOR_NON_POINT_DISABLED";
public static final int DEFAULT_HTTP_MAX_REQUEST_TIMEOUT = 60;
public static final String HTTP_MAX_REQUEST_TIMEOUT = "COSMOS.HTTP_MAX_REQUEST_TIMEOUT";
public static final String HTTP_MAX_REQUEST_TIMEOUT_VARIABLE = "COSMOS_HTTP_MAX_REQUEST_TIMEOUT";
private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS";
private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS";
public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY";
public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY";
private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG";
private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS";
private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT";
private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME";
private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED";
private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60;
private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60;
private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024;
private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096;
private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192;
private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024;
private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6;
private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6;
private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6;
private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5;
private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30;
private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30;
private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4;
private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10;
private static final int CPU_CNT = Runtime.getRuntime().availableProcessors();
private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500;
private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60;
private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60);
private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45);
private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool";
private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60;
private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5;
private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5;
public static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS";
private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 500;
public static final int MIN_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS = 100;
private static final String DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS_NAME =
"COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_IN_REGION-RETRY_TIME_IN_MILLISECONDS";
private static final int DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS = 500;
private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE";
private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false;
private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED";
private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false;
private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING";
private static final boolean DEFAULT_USE_LEGACY_TRACING = false;
private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED";
private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true;
private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED";
private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true;
private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT";
private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1;
private static final String MAX_TRACE_MESSAGE_LENGTH = "COSMOS.MAX_TRACE_MESSAGE_LENGTH";
private static final int DEFAULT_MAX_TRACE_MESSAGE_LENGTH = 32 * 1024;
private static final int MIN_MAX_TRACE_MESSAGE_LENGTH = 8 * 1024;
private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY";
private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt();
private static final String OPEN_CONNECTIONS_CONCURRENCY = "COSMOS.OPEN_CONNECTIONS_CONCURRENCY";
private static final int DEFAULT_OPEN_CONNECTIONS_CONCURRENCY = 1;
public static final String MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = "COSMOS.MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED";
private static final int DEFAULT_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = 1;
public static final int MIN_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED = 1;
public static final String TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS = "COSMOS.TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS";
public static final String DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR = "COSMOS.DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR";
public static final boolean DEFAULT_DIAGNOSTICS_PROVIDER_SYSTEM_EXIT_ON_ERROR = true;
public Configs() {
this.sslContext = sslContextInit();
}
public static int getCPUCnt() {
return CPU_CNT;
}
private SslContext sslContextInit() {
try {
SslProvider sslProvider = SslContext.defaultClientProvider();
return SslContextBuilder.forClient().sslProvider(sslProvider).build();
} catch (SSLException sslException) {
logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException);
throw new IllegalStateException(sslException);
}
}
public SslContext getSslContext() {
return this.sslContext;
}
public Protocol getProtocol() {
String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull(
emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)),
DEFAULT_PROTOCOL.name()));
try {
return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT));
} catch (Exception e) {
logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e);
return DEFAULT_PROTOCOL;
}
}
public int getMaxNumberOfReadBarrierReadRetries() {
return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES;
}
public int getMaxNumberOfPrimaryReadRetries() {
return MAX_NUMBER_OF_PRIMARY_READ_RETRIES;
}
public int getMaxNumberOfReadQuorumRetries() {
return MAX_NUMBER_OF_READ_QUORUM_RETRIES;
}
public int getDelayBetweenReadBarrierCallsInMs() {
return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS;
}
public int getMaxBarrierRetriesForMultiRegion() {
return MAX_BARRIER_RETRIES_FOR_MULTI_REGION;
}
public int getBarrierRetryIntervalInMsForMultiRegion() {
return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION;
}
public int getMaxShortBarrierRetriesForMultiRegion() {
return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION;
}
public int getShortBarrierRetryIntervalInMsForMultiRegion() {
return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION;
}
public int getDirectHttpsMaxConnectionLimit() {
return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE);
}
public int getMaxHttpHeaderSize() {
return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE);
}
public int getMaxHttpInitialLineLength() {
return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH);
}
public int getMaxHttpChunkSize() {
return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES);
}
public int getMaxHttpBodyLength() {
return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES);
}
public int getUnavailableLocationsExpirationTimeInSeconds() {
return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS);
}
public static int getClientTelemetrySchedulingInSec() {
return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS);
}
public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() {
return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS);
}
public String getReactorNettyConnectionPoolName() {
return REACTOR_NETTY_CONNECTION_POOL_NAME;
}
public Duration getMaxIdleConnectionTimeout() {
return MAX_IDLE_CONNECTION_TIMEOUT;
}
public Duration getConnectionAcquireTimeout() {
return CONNECTION_ACQUIRE_TIMEOUT;
}
public static int getHttpResponseTimeoutInSeconds() {
return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static int getQueryPlanResponseTimeoutInSeconds() {
return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static String getClientTelemetryEndpoint() {
return System.getProperty(CLIENT_TELEMETRY_ENDPOINT);
}
public static String getClientTelemetryProxyOptionsConfig() {
return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG);
}
public static String getNonIdempotentWriteRetryPolicy() {
String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return valueFromSystemProperty;
}
return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE);
}
public static int getDefaultHttpPoolSize() {
String valueFromSystemProperty = System.getProperty(HTTP_DEFAULT_CONNECTION_POOL_SIZE);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(HTTP_DEFAULT_CONNECTION_POOL_SIZE_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.valueOf(valueFromEnvVariable);
}
return DEFAULT_HTTP_DEFAULT_CONNECTION_POOL_SIZE;
}
public static boolean isDefaultE2ETimeoutDisabledForNonPointOperations() {
String valueFromSystemProperty = System.getProperty(DEFAULT_E2E_FOR_NON_POINT_DISABLED);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Boolean.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(DEFAULT_E2E_FOR_NON_POINT_DISABLED_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Boolean.valueOf(valueFromEnvVariable);
}
return DEFAULT_E2E_FOR_NON_POINT_DISABLED_DEFAULT;
}
public static int getMaxHttpRequestTimeout() {
String valueFromSystemProperty = System.getProperty(HTTP_MAX_REQUEST_TIMEOUT);
if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) {
return Integer.valueOf(valueFromSystemProperty);
}
String valueFromEnvVariable = System.getenv(HTTP_MAX_REQUEST_TIMEOUT_VARIABLE);
if (valueFromEnvVariable != null && !valueFromEnvVariable.isEmpty()) {
return Integer.valueOf(valueFromEnvVariable);
}
return DEFAULT_HTTP_MAX_REQUEST_TIMEOUT;
}
public static String getEnvironmentName() {
return System.getProperty(ENVIRONMENT_NAME);
}
public static boolean isQueryPlanCachingEnabled() {
return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true);
}
public static int getAddressRefreshResponseTimeoutInSeconds() {
return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS);
}
public static int getSessionTokenMismatchDefaultWaitTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS);
}
public static int getSessionTokenMismatchInitialBackoffTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS);
}
public static int getSessionTokenMismatchMaximumBackoffTimeInMs() {
return getJVMConfigAsInt(
DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME,
DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS);
}
public static int getSpeculationType() {
return getJVMConfigAsInt(SPECULATION_TYPE, 0);
}
public static int speculationThreshold() {
return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500);
}
public static int speculationThresholdStep() {
return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100);
}
public static boolean shouldSwitchOffIOThreadForResponse() {
return getJVMConfigAsBoolean(
SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME,
DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE);
}
public static boolean isEmptyPageDiagnosticsEnabled() {
return getJVMConfigAsBoolean(
QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED,
DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED);
}
public static boolean useLegacyTracing() {
return getJVMConfigAsBoolean(
USE_LEGACY_TRACING,
DEFAULT_USE_LEGACY_TRACING);
}
private static int getJVMConfigAsInt(String propName, int defaultValue) {
String propValue = System.getProperty(propName);
return getIntValue(propValue, defaultValue);
}
private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) {
String propValue = System.getProperty(propName);
return getBooleanValue(propValue, defaultValue);
}
private static int getIntValue(String val, int defaultValue) {
if (StringUtils.isEmpty(val)) {
return defaultValue;
} else {
return Integer.valueOf(val);
}
}
private static boolean getBooleanValue(String val, boolean defaultValue) {
if (StringUtils.isEmpty(val)) {
return defaultValue;
} else {
return Boolean.valueOf(val);
}
}
public static boolean isReplicaAddressValidationEnabled() {
return getJVMConfigAsBoolean(
REPLICA_ADDRESS_VALIDATION_ENABLED,
DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED);
}
public static boolean isTcpHealthCheckTimeoutDetectionEnabled() {
return getJVMConfigAsBoolean(
TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED,
DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED);
}
public static int getMinConnectionPoolSizePerEndpoint() {
return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT);
}
public static int getOpenConnectionsConcurrency() {
return getIntValue(System.getProperty(OPEN_CONNECTIONS_CONCURRENCY), DEFAULT_OPEN_CONNECTIONS_CONCURRENCY);
}
public static int getAggressiveWarmupConcurrency() {
return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY);
}
public static int getMaxRetriesInLocalRegionWhenRemoteRegionPreferred() {
return
Math.max(
getIntValue(
System.getProperty(MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED),
DEFAULT_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED),
MIN_MAX_RETRIES_IN_LOCAL_REGION_WHEN_REMOTE_REGION_PREFERRED);
}
public static Duration getMinRetryTimeInLocalRegionWhenRemoteRegionPreferred() {
return
Duration.ofMillis(Math.max(
getIntValue(
System.getProperty(DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS_NAME),
DEFAULT_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS),
MIN_MIN_IN_REGION_RETRY_TIME_FOR_WRITES_MS));
}
public static int getMaxTraceMessageLength() {
return
Math.max(
getIntValue(
System.getProperty(MAX_TRACE_MESSAGE_LENGTH),
DEFAULT_MAX_TRACE_MESSAGE_LENGTH),
MIN_MAX_TRACE_MESSAGE_LENGTH);
}
public static Duration getTcpConnectionAcquisitionTimeout(int defaultValueInMs) {
return Duration.ofMillis(
getIntValue(
System.getProperty(TCP_CONNECTION_ACQUISITION_TIMEOUT_IN_MS),
defaultValueInMs
)
);
}
} |
Added JvmFatalErrorMapperExecutionCount in the diagnostics | public Exception mapFatalError(Error error) {
if (error == null) {
return null;
}
if (this.fatalErrorMapper.get() != null) {
Exception mappedException = this.fatalErrorMapper.get().apply(error);
LOGGER.info("Mapping from Error {} to Exception {}", error.getClass(), mappedException.getClass());
return mappedException;
}
return null;
} | return mappedException; | public Exception mapFatalError(Error error) {
if (error == null || this.fatalErrorMapper.get() == null) {
return null;
}
return this.mapToException(error);
} | class DiagnosticsProviderJvmFatalErrorMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(DiagnosticsProviderJvmFatalErrorMapper.class);
private static DiagnosticsProviderJvmFatalErrorMapper diagnosticsProviderJvmFatalErrorMapper =
new DiagnosticsProviderJvmFatalErrorMapper();
private final AtomicReference<Function<Error, Exception>> fatalErrorMapper;
public DiagnosticsProviderJvmFatalErrorMapper() {
this.fatalErrorMapper = new AtomicReference<>();
}
public void registerFatalErrorMapper(Function<Error, Exception> fatalErrorMapper) {
LOGGER.info("Register diagnostics provider fatal error mapper");
this.fatalErrorMapper.set(fatalErrorMapper);
}
public static DiagnosticsProviderJvmFatalErrorMapper getMapper() {
return diagnosticsProviderJvmFatalErrorMapper;
}
} | class DiagnosticsProviderJvmFatalErrorMapper {
private static final Logger LOGGER = LoggerFactory.getLogger(DiagnosticsProviderJvmFatalErrorMapper.class);
private static DiagnosticsProviderJvmFatalErrorMapper diagnosticsProviderJvmFatalErrorMapper =
new DiagnosticsProviderJvmFatalErrorMapper();
private final AtomicReference<Function<Error, Exception>> fatalErrorMapper;
private final AtomicLong mapperExecutionCount;
public DiagnosticsProviderJvmFatalErrorMapper() {
this.fatalErrorMapper = new AtomicReference<>();
this.mapperExecutionCount = new AtomicLong(0);
}
public void registerFatalErrorMapper(Function<Error, Exception> fatalErrorMapper) {
LOGGER.info("Register diagnostics provider fatal error mapper");
this.fatalErrorMapper.set(fatalErrorMapper);
}
private Exception mapToException(Error error) {
try {
this.mapperExecutionCount.getAndIncrement();
Exception mappedException = this.fatalErrorMapper.get().apply(error);
if (mappedException != null) {
LOGGER.info("Mapping from Error {} to Exception {}", error.getClass(), mappedException.getClass());
return mappedException;
} else {
LOGGER.info("Mapped exception being null.");
}
} catch (Exception mapException) {
LOGGER.error("Map fatal error failed. ", mapException);
}
return null;
}
public static DiagnosticsProviderJvmFatalErrorMapper getMapper() {
return diagnosticsProviderJvmFatalErrorMapper;
}
public long getMapperExecutionCount() {
return mapperExecutionCount.get();
}
} |
```suggestion return responseHeaders != null && Objects.equals(responseHeaders.get(HeaderName.CONTENT_TYPE.toString()), ContentType.TEXT_EVENT_STREAM); ``` Let object equals handle nullness checking | private static boolean isTextEventStream(okhttp3.Headers responseHeaders) {
return responseHeaders != null && responseHeaders.get(HeaderName.CONTENT_TYPE.toString()) != null &&
Objects.equals(responseHeaders.get(HeaderName.CONTENT_TYPE.toString()), ContentType.TEXT_EVENT_STREAM);
} | Objects.equals(responseHeaders.get(HeaderName.CONTENT_TYPE.toString()), ContentType.TEXT_EVENT_STREAM); | private static boolean isTextEventStream(okhttp3.Headers responseHeaders) {
if (responseHeaders != null) {
return ServerSentEventUtil
.isTextEventStreamContentType(responseHeaders.get(HttpHeaderName.CONTENT_TYPE.toString()));
}
return false;
} | class OkHttpHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpHttpClient.class);
private static final byte[] EMPTY_BODY = new byte[0];
private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY);
final OkHttpClient httpClient;
OkHttpHttpClient(OkHttpClient httpClient) {
this.httpClient = httpClient;
}
@Override
public Response<?> send(HttpRequest request) {
boolean eagerlyConvertHeaders = request.getMetadata().isEagerlyConvertHeaders();
boolean eagerlyReadResponse = request.getMetadata().isEagerlyReadResponse();
boolean ignoreResponseBody = request.getMetadata().isIgnoreResponseBody();
Request okHttpRequest = toOkHttpRequest(request);
try {
okhttp3.Response okHttpResponse = httpClient.newCall(okHttpRequest).execute();
return toResponse(request, okHttpResponse, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders);
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Converts the given generic-core request to okhttp request.
*
* @param request the generic-core request.
*
* @return Th eOkHttp request.
*/
private okhttp3.Request toOkHttpRequest(HttpRequest request) {
Request.Builder requestBuilder = new Request.Builder()
.url(request.getUrl());
if (request.getHeaders() != null) {
for (Header hdr : request.getHeaders()) {
hdr.getValues().forEach(value -> requestBuilder.addHeader(hdr.getName(), value));
}
}
if (request.getHttpMethod() == HttpMethod.GET) {
return requestBuilder.get().build();
} else if (request.getHttpMethod() == HttpMethod.HEAD) {
return requestBuilder.head().build();
}
RequestBody okHttpRequestBody = toOkHttpRequestBody(request.getBody(), request.getHeaders());
return requestBuilder.method(request.getHttpMethod().toString(), okHttpRequestBody).build();
}
/**
* Create a Mono of okhttp3.RequestBody from the given BinaryData.
*
* @param bodyContent The request body content
* @param headers the headers associated with the original request
*
* @return The Mono emitting okhttp request
*/
private RequestBody toOkHttpRequestBody(BinaryData bodyContent, Headers headers) {
if (bodyContent == null) {
return EMPTY_REQUEST_BODY;
}
String contentType = headers.getValue(HeaderName.CONTENT_TYPE);
MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType);
if (bodyContent instanceof InputStreamBinaryData) {
long effectiveContentLength = getRequestContentLength(bodyContent, headers);
return new OkHttpInputStreamRequestBody((InputStreamBinaryData) bodyContent, effectiveContentLength,
mediaType);
} else if (bodyContent instanceof FileBinaryData) {
long effectiveContentLength = getRequestContentLength(bodyContent, headers);
return new OkHttpFileRequestBody((FileBinaryData) bodyContent, effectiveContentLength, mediaType);
} else {
return RequestBody.create(bodyContent.toBytes(), mediaType);
}
}
private static long getRequestContentLength(BinaryData content, Headers headers) {
Long contentLength = content.getLength();
if (contentLength == null) {
String contentLengthHeaderValue = headers.getValue(HeaderName.CONTENT_LENGTH);
if (contentLengthHeaderValue != null) {
contentLength = Long.parseLong(contentLengthHeaderValue);
} else {
contentLength = -1L;
}
}
return contentLength;
}
private Response<?> toResponse(HttpRequest request, okhttp3.Response response, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
okhttp3.Headers responseHeaders = response.headers();
if (isTextEventStream(responseHeaders)) {
return processServerSentEvent(request, response, eagerlyConvertHeaders);
} else {
return processResponse(request, response, eagerlyReadResponse, ignoreResponseBody, eagerlyConvertHeaders);
}
}
private OkHttpResponse processServerSentEvent(HttpRequest request, okhttp3.Response response,
boolean eagerlyConvertHeaders) {
ServerSentEventListener listener = request.getServerSentEventListener();
if (listener != null && response.body() != null) {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.body().byteStream(),
StandardCharsets.UTF_8))) {
RetrySSEResult retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null && !retryExceptionForSSE(retrySSEResult, listener, request)
&& !Thread.currentThread().isInterrupted()) {
this.send(request);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
} else {
LOGGER.atInfo().log(() -> NO_LISTENER_LOG_MESSAGE);
}
return new OkHttpResponse(response, request, eagerlyConvertHeaders, EMPTY_BODY);
}
private Response<?> processResponse(HttpRequest request, okhttp3.Response response, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
if (eagerlyReadResponse || ignoreResponseBody) {
try (ResponseBody body = response.body()) {
byte[] bytes = (body != null) ? body.bytes() : EMPTY_BODY;
return new OkHttpResponse(response, request, eagerlyConvertHeaders, bytes);
}
} else {
return new OkHttpResponse(response, request, eagerlyConvertHeaders, null);
}
}
} | class OkHttpHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpHttpClient.class);
private static final byte[] EMPTY_BODY = new byte[0];
private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY);
final OkHttpClient httpClient;
OkHttpHttpClient(OkHttpClient httpClient) {
this.httpClient = httpClient;
}
@Override
public Response<?> send(HttpRequest request) {
boolean eagerlyConvertHeaders = request.getMetadata().isEagerlyConvertHeaders();
boolean eagerlyReadResponse = request.getMetadata().isEagerlyReadResponse();
boolean ignoreResponseBody = request.getMetadata().isIgnoreResponseBody();
Request okHttpRequest = toOkHttpRequest(request);
try {
okhttp3.Response okHttpResponse = httpClient.newCall(okHttpRequest).execute();
return toResponse(request, okHttpResponse, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders);
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Converts the given generic-core request to okhttp request.
*
* @param request the generic-core request.
*
* @return Th eOkHttp request.
*/
private okhttp3.Request toOkHttpRequest(HttpRequest request) {
Request.Builder requestBuilder = new Request.Builder()
.url(request.getUrl());
if (request.getHeaders() != null) {
for (HttpHeader hdr : request.getHeaders()) {
hdr.getValues().forEach(value -> requestBuilder.addHeader(hdr.getName(), value));
}
}
if (request.getHttpMethod() == HttpMethod.GET) {
return requestBuilder.get().build();
} else if (request.getHttpMethod() == HttpMethod.HEAD) {
return requestBuilder.head().build();
}
RequestBody okHttpRequestBody = toOkHttpRequestBody(request.getBody(), request.getHeaders());
return requestBuilder.method(request.getHttpMethod().toString(), okHttpRequestBody).build();
}
/**
* Create a Mono of okhttp3.RequestBody from the given BinaryData.
*
* @param bodyContent The request body content
* @param headers the headers associated with the original request
*
* @return The Mono emitting okhttp request
*/
private RequestBody toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) {
if (bodyContent == null) {
return EMPTY_REQUEST_BODY;
}
String contentType = headers.getValue(HttpHeaderName.CONTENT_TYPE);
MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType);
if (bodyContent instanceof InputStreamBinaryData) {
long effectiveContentLength = getRequestContentLength(bodyContent, headers);
return new OkHttpInputStreamRequestBody((InputStreamBinaryData) bodyContent, effectiveContentLength,
mediaType);
} else if (bodyContent instanceof FileBinaryData) {
long effectiveContentLength = getRequestContentLength(bodyContent, headers);
return new OkHttpFileRequestBody((FileBinaryData) bodyContent, effectiveContentLength, mediaType);
} else {
return RequestBody.create(bodyContent.toBytes(), mediaType);
}
}
private static long getRequestContentLength(BinaryData content, HttpHeaders headers) {
Long contentLength = content.getLength();
if (contentLength == null) {
String contentLengthHeaderValue = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLengthHeaderValue != null) {
contentLength = Long.parseLong(contentLengthHeaderValue);
} else {
contentLength = -1L;
}
}
return contentLength;
}
private Response<?> toResponse(HttpRequest request, okhttp3.Response response, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
okhttp3.Headers responseHeaders = response.headers();
if (isTextEventStream(responseHeaders) && response.body() != null) {
ServerSentEventListener listener = request.getServerSentEventListener();
if (listener != null) {
processTextEventStream(request,
httpRequest -> this.send(httpRequest), response.body().byteStream(), listener, LOGGER);
} else {
throw LOGGER.logThrowableAsError(new RuntimeException(ServerSentEventUtil.NO_LISTENER_ERROR_MESSAGE));
}
return new OkHttpResponse(response, request, eagerlyConvertHeaders, EMPTY_BODY);
}
return processResponse(request, response, eagerlyReadResponse, ignoreResponseBody, eagerlyConvertHeaders);
}
private Response<?> processResponse(HttpRequest request, okhttp3.Response response, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
if (eagerlyReadResponse || ignoreResponseBody) {
try (ResponseBody body = response.body()) {
byte[] bytes = (body != null) ? body.bytes() : EMPTY_BODY;
return new OkHttpResponse(response, request, eagerlyConvertHeaders, bytes);
}
} else {
return new OkHttpResponse(response, request, eagerlyConvertHeaders, null);
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.